diff --git a/CHANGELOG.md b/CHANGELOG.md index 5acb161bf9..f20276cbd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,65 @@ # Changelog +## [3.9.2-nightly.1](https://github.com/pypeclub/OpenPype/tree/HEAD) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...HEAD) + +**🚀 Enhancements** + +- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) +- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) +- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906) +- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901) + +**🐛 Bug fixes** + +- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) +- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) + +**🔀 Refactored code** + +- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914) + +## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.1-nightly.3...3.9.1) + +**🚀 Enhancements** + +- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) +- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) +- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) +- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) +- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) + +**🐛 Bug fixes** + +- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) +- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) +- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) +- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) +- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) +- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) +- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) +- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) + +**🔀 Refactored code** + +- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) +- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) + ## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...3.9.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0) **Deprecated:** - AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) +### 📖 Documentation + +- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) + **🚀 Enhancements** - General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) @@ -42,6 +94,7 @@ - Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) - Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) - Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) +- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) - Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) - Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) - General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) @@ -59,7 +112,6 @@ - General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) - Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) - Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) -- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) ## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) diff --git a/openpype/__init__.py b/openpype/__init__.py index 7720c9dfb8..8b94b2dc3f 100644 --- a/openpype/__init__.py +++ b/openpype/__init__.py @@ -75,7 +75,11 @@ def install(): """Install Pype to Avalon.""" from pyblish.lib import MessageHandler from openpype.modules import load_modules - from openpype.pipeline import LegacyCreator + from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + register_inventory_action, + ) from avalon import pipeline # Make sure modules are loaded @@ -91,7 +95,7 @@ def install(): log.info("Registering global plug-ins..") pyblish.register_plugin_path(PUBLISH_PATH) pyblish.register_discovery_filter(filter_pyblish_plugins) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) project_name = os.environ.get("AVALON_PROJECT") @@ -119,9 +123,9 @@ def install(): continue pyblish.register_plugin_path(path) - avalon.register_plugin_path(avalon.Loader, path) + register_loader_plugin_path(path) avalon.register_plugin_path(LegacyCreator, path) - avalon.register_plugin_path(avalon.InventoryAction, path) + register_inventory_action(path) # apply monkey patched discover to original one log.info("Patching discovery") @@ -139,10 +143,12 @@ def _on_task_change(): @import_wrapper def uninstall(): """Uninstall Pype from Avalon.""" + from openpype.pipeline import deregister_loader_plugin_path + log.info("Deregistering global plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) pyblish.deregister_discovery_filter(filter_pyblish_plugins) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) log.info("Global plug-ins unregistred") # restore original discover diff --git a/openpype/cli.py b/openpype/cli.py index 155e07dea3..cbeb7fef9b 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -101,7 +101,7 @@ def eventserver(debug, on linux and window service). """ if debug: - os.environ['OPENPYPE_DEBUG'] = "3" + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_eventservercli( ftrack_url, @@ -128,7 +128,7 @@ def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): Expect "pype.club" user created on Ftrack. """ if debug: - os.environ['OPENPYPE_DEBUG'] = "3" + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_webpublisher_webservercli( upload_dir=upload_dir, @@ -176,7 +176,7 @@ def publish(debug, paths, targets, gui): More than one path is allowed. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands.publish(list(paths), targets, gui) @@ -195,7 +195,7 @@ def remotepublishfromapp(debug, project, path, host, user=None, targets=None): More than one path is allowed. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands.remotepublishfromapp( project, path, host, user, targets=targets ) @@ -215,7 +215,7 @@ def remotepublish(debug, project, path, user=None, targets=None): More than one path is allowed. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands.remotepublish(project, path, user, targets=targets) @@ -240,7 +240,7 @@ def texturecopy(debug, project, asset, path): Nothing is written to database. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().texture_copy(project, asset, path) @@ -409,7 +409,7 @@ def syncserver(debug, active_site): var OPENPYPE_LOCAL_ID set to 'active_site'. """ if debug: - os.environ['OPENPYPE_DEBUG'] = '3' + os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().syncserver(active_site) diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index 41d2417818..bb9affc9b6 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -2,14 +2,20 @@ import os import sys from Qt import QtWidgets +from bson.objectid import ObjectId import pyblish.api import avalon.api -from avalon import io, pipeline +from avalon import io from openpype import lib from openpype.api import Logger -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) import openpype.hosts.aftereffects from openpype.lib import register_event_callback @@ -25,7 +31,6 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") def check_inventory(): @@ -38,7 +43,7 @@ def check_inventory(): representation = container['representation'] representation_doc = io.find_one( { - "_id": io.ObjectId(representation), + "_id": ObjectId(representation), "type": "representation" }, projection={"parent": True} @@ -67,7 +72,7 @@ def install(): pyblish.api.register_host("aftereffects") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info(PUBLISH_PATH) @@ -80,7 +85,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) @@ -145,7 +150,7 @@ def containerise(name, """ data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": name, "namespace": namespace, "loader": str(loader), diff --git a/openpype/hosts/aftereffects/api/plugin.py b/openpype/hosts/aftereffects/api/plugin.py index fbe07663dd..29705cc5be 100644 --- a/openpype/hosts/aftereffects/api/plugin.py +++ b/openpype/hosts/aftereffects/api/plugin.py @@ -1,9 +1,8 @@ -import avalon.api +from openpype.pipeline import LoaderPlugin from .launch_logic import get_stub -class AfterEffectsLoader(avalon.api.Loader): +class AfterEffectsLoader(LoaderPlugin): @staticmethod def get_stub(): return get_stub() - diff --git a/openpype/hosts/aftereffects/api/workio.py b/openpype/hosts/aftereffects/api/workio.py index 04c7834d8f..5a8f86ead5 100644 --- a/openpype/hosts/aftereffects/api/workio.py +++ b/openpype/hosts/aftereffects/api/workio.py @@ -1,8 +1,8 @@ """Host API required Work Files tool""" import os +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS from .launch_logic import get_stub -from avalon import api def _active_document(): @@ -14,7 +14,7 @@ def _active_document(): def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["aftereffects"] + return HOST_WORKFILE_EXTENSIONS["aftereffects"] def has_unsaved_changes(): diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py index 1a2d6fc432..be43cae44e 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_background.py +++ b/openpype/hosts/aftereffects/plugins/load/load_background.py @@ -1,11 +1,10 @@ import re -import avalon.api - from openpype.lib import ( get_background_layers, get_unique_layer_name ) +from openpype.pipeline import get_representation_path from openpype.hosts.aftereffects.api import ( AfterEffectsLoader, containerise @@ -78,7 +77,7 @@ class BackgroundLoader(AfterEffectsLoader): else: # switching version - keep same name comp_name = container["namespace"] - path = avalon.api.get_representation_path(representation) + path = get_representation_path(representation) layers = get_background_layers(path) comp = stub.reload_background(container["members"][1], diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py index 9dbbf7aae1..9eb9e80a2c 100644 --- a/openpype/hosts/aftereffects/plugins/load/load_file.py +++ b/openpype/hosts/aftereffects/plugins/load/load_file.py @@ -1,8 +1,8 @@ import re -import avalon.api from openpype import lib +from openpype.pipeline import get_representation_path from openpype.hosts.aftereffects.api import ( AfterEffectsLoader, containerise @@ -92,7 +92,7 @@ class FileLoader(AfterEffectsLoader): "{}_{}".format(context["asset"], context["subset"])) else: # switching version - keep same name layer_name = container["namespace"] - path = avalon.api.get_representation_path(representation) + path = get_representation_path(representation) # with aftereffects.maintained_selection(): # TODO stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name) stub.imprint( diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index 3069c3e1c9..29d6d356c8 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -328,7 +328,6 @@ class LaunchWorkFiles(LaunchQtApp): result = super().execute(context) self._window.set_context({ "asset": avalon.api.Session["AVALON_ASSET"], - "silo": avalon.api.Session["AVALON_SILO"], "task": avalon.api.Session["AVALON_TASK"] }) return result diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index efa08ba59e..8c580cf214 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -12,9 +12,13 @@ from . import ops import pyblish.api import avalon.api from avalon import io, schema -from avalon.pipeline import AVALON_CONTAINER_ID -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) from openpype.api import Logger from openpype.lib import ( register_event_callback, @@ -27,7 +31,6 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") ORIGINAL_EXCEPTHOOK = sys.excepthook @@ -50,7 +53,7 @@ def install(): pyblish.api.register_host("blender") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) - avalon.api.register_plugin_path(avalon.api.Loader, str(LOAD_PATH)) + register_loader_plugin_path(str(LOAD_PATH)) avalon.api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) lib.append_user_scripts() @@ -72,7 +75,7 @@ def uninstall(): pyblish.api.deregister_host("blender") pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) - avalon.api.deregister_plugin_path(avalon.api.Loader, str(LOAD_PATH)) + deregister_loader_plugin_path(str(LOAD_PATH)) avalon.api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) if not IS_HEADLESS: diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py index 20d1e4c8db..3207f543b7 100644 --- a/openpype/hosts/blender/api/plugin.py +++ b/openpype/hosts/blender/api/plugin.py @@ -5,8 +5,10 @@ from typing import Dict, List, Optional import bpy -import avalon.api -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from .pipeline import AVALON_CONTAINERS from .ops import ( MainThreadItem, @@ -145,13 +147,13 @@ class Creator(LegacyCreator): return collection -class Loader(avalon.api.Loader): +class Loader(LoaderPlugin): """Base class for Loader plug-ins.""" hosts = ["blender"] -class AssetLoader(avalon.api.Loader): +class AssetLoader(LoaderPlugin): """A basic AssetLoader for Blender This will implement the basic logic for linking/appending assets diff --git a/openpype/hosts/blender/api/workio.py b/openpype/hosts/blender/api/workio.py index fd68761982..5eb9f82999 100644 --- a/openpype/hosts/blender/api/workio.py +++ b/openpype/hosts/blender/api/workio.py @@ -4,7 +4,8 @@ from pathlib import Path from typing import List, Optional import bpy -from avalon import api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS class OpenFileCacher: @@ -77,7 +78,7 @@ def has_unsaved_changes() -> bool: def file_extensions() -> List[str]: """Return the supported file extensions for Blender scene files.""" - return api.HOST_WORKFILE_EXTENSIONS["blender"] + return HOST_WORKFILE_EXTENSIONS["blender"] def work_root(session: dict) -> str: diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py index 07800521c9..1b2e800769 100644 --- a/openpype/hosts/blender/plugins/load/load_abc.py +++ b/openpype/hosts/blender/plugins/load/load_abc.py @@ -6,11 +6,14 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) + from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) from openpype.hosts.blender.api import plugin, lib @@ -178,7 +181,7 @@ class CacheModelLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_action.py b/openpype/hosts/blender/plugins/load/load_action.py index a9d8522220..3c8fe988f0 100644 --- a/openpype/hosts/blender/plugins/load/load_action.py +++ b/openpype/hosts/blender/plugins/load/load_action.py @@ -5,9 +5,13 @@ from pathlib import Path from pprint import pformat from typing import Dict, List, Optional -from avalon import api, blender import bpy +from openpype.pipeline import get_representation_path import openpype.hosts.blender.api.plugin +from openpype.hosts.blender.api.pipeline import ( + containerise_existing, + AVALON_PROPERTY, +) logger = logging.getLogger("openpype").getChild("blender").getChild("load_action") @@ -49,7 +53,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): container = bpy.data.collections.new(lib_container) container.name = container_name - blender.pipeline.containerise_existing( + containerise_existing( container, name, namespace, @@ -57,8 +61,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): self.__class__.__name__, ) - container_metadata = container.get( - blender.pipeline.AVALON_PROPERTY) + container_metadata = container.get(AVALON_PROPERTY) container_metadata["libpath"] = libpath container_metadata["lib_container"] = lib_container @@ -90,16 +93,16 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): anim_data.action.make_local() - if not obj.get(blender.pipeline.AVALON_PROPERTY): + if not obj.get(AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + obj[AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info = obj[AVALON_PROPERTY] avalon_info.update({"container_name": container_name}) objects_list.append(obj) - animation_container.pop(blender.pipeline.AVALON_PROPERTY) + animation_container.pop(AVALON_PROPERTY) # Save the list of objects in the metadata container container_metadata["objects"] = objects_list @@ -128,7 +131,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): container["objectName"] ) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() logger.info( @@ -153,8 +156,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): f"Unsupported file: {libpath}" ) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) + collection_metadata = collection.get(AVALON_PROPERTY) collection_libpath = collection_metadata["libpath"] normalized_collection_libpath = ( @@ -225,16 +227,16 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): strip.action = anim_data.action strip.action_frame_end = anim_data.action.frame_range[1] - if not obj.get(blender.pipeline.AVALON_PROPERTY): + if not obj.get(AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + obj[AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info = obj[AVALON_PROPERTY] avalon_info.update({"container_name": collection.name}) objects_list.append(obj) - anim_container.pop(blender.pipeline.AVALON_PROPERTY) + anim_container.pop(AVALON_PROPERTY) # Save the list of objects in the metadata container collection_metadata["objects"] = objects_list @@ -266,8 +268,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader): "Nested collections are not supported." ) - collection_metadata = collection.get( - blender.pipeline.AVALON_PROPERTY) + collection_metadata = collection.get(AVALON_PROPERTY) objects = collection_metadata["objects"] lib_container = collection_metadata["lib_container"] diff --git a/openpype/hosts/blender/plugins/load/load_audio.py b/openpype/hosts/blender/plugins/load/load_audio.py index e065150c15..3f4fcc17de 100644 --- a/openpype/hosts/blender/plugins/load/load_audio.py +++ b/openpype/hosts/blender/plugins/load/load_audio.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) @@ -102,7 +104,7 @@ class AudioLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) self.log.info( "Container: %s\nRepresentation: %s", diff --git a/openpype/hosts/blender/plugins/load/load_camera_blend.py b/openpype/hosts/blender/plugins/load/load_camera_blend.py index 61955f124d..f00027f0b4 100644 --- a/openpype/hosts/blender/plugins/load/load_camera_blend.py +++ b/openpype/hosts/blender/plugins/load/load_camera_blend.py @@ -7,12 +7,14 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) logger = logging.getLogger("openpype").getChild( @@ -155,7 +157,7 @@ class BlendCameraLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_camera_fbx.py b/openpype/hosts/blender/plugins/load/load_camera_fbx.py index 175ddacf9f..97f844e610 100644 --- a/openpype/hosts/blender/plugins/load/load_camera_fbx.py +++ b/openpype/hosts/blender/plugins/load/load_camera_fbx.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin, lib from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) @@ -143,7 +145,7 @@ class FbxCameraLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_fbx.py b/openpype/hosts/blender/plugins/load/load_fbx.py index c6e6af5592..ee2e7d175c 100644 --- a/openpype/hosts/blender/plugins/load/load_fbx.py +++ b/openpype/hosts/blender/plugins/load/load_fbx.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin, lib from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) @@ -187,7 +189,7 @@ class FbxModelLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_layout_blend.py b/openpype/hosts/blender/plugins/load/load_layout_blend.py index 7f8ae610c6..cf8e89ed1f 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_blend.py +++ b/openpype/hosts/blender/plugins/load/load_layout_blend.py @@ -6,14 +6,16 @@ from typing import Dict, List, Optional import bpy -from avalon import api from openpype import lib -from openpype.pipeline import legacy_create +from openpype.pipeline import ( + legacy_create, + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) @@ -309,7 +311,7 @@ class BlendLayoutLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_layout_json.py b/openpype/hosts/blender/plugins/load/load_layout_json.py index 5b5f9ab83d..a0580af4a0 100644 --- a/openpype/hosts/blender/plugins/load/load_layout_json.py +++ b/openpype/hosts/blender/plugins/load/load_layout_json.py @@ -7,12 +7,18 @@ from typing import Dict, Optional import bpy -from avalon import api +from openpype.pipeline import ( + discover_loader_plugins, + remove_container, + load_container, + get_representation_path, + loaders_from_representation, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api.pipeline import ( AVALON_INSTANCES, AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) from openpype.hosts.blender.api import plugin @@ -33,7 +39,7 @@ class JsonLayoutLoader(plugin.AssetLoader): objects = list(asset_group.children) for obj in objects: - api.remove(obj.get(AVALON_PROPERTY)) + remove_container(obj.get(AVALON_PROPERTY)) def _remove_animation_instances(self, asset_group): instances = bpy.data.collections.get(AVALON_INSTANCES) @@ -66,13 +72,13 @@ class JsonLayoutLoader(plugin.AssetLoader): with open(libpath, "r") as fp: data = json.load(fp) - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() for element in data: reference = element.get('reference') family = element.get('family') - loaders = api.loaders_from_representation(all_loaders, reference) + loaders = loaders_from_representation(all_loaders, reference) loader = self._get_loader(loaders, family) if not loader: @@ -102,7 +108,7 @@ class JsonLayoutLoader(plugin.AssetLoader): # at this time it will not return anything. The assets will be # loaded in the next Blender cycle, so we use the options to # set the transform, parent and assign the action, if there is one. - api.load( + load_container( loader, reference, namespace=instance_name, @@ -188,7 +194,7 @@ class JsonLayoutLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_look.py b/openpype/hosts/blender/plugins/load/load_look.py index 066ec0101b..70d1b95f02 100644 --- a/openpype/hosts/blender/plugins/load/load_look.py +++ b/openpype/hosts/blender/plugins/load/load_look.py @@ -8,7 +8,7 @@ import os import json import bpy -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( containerise_existing, @@ -140,7 +140,7 @@ class BlendLookLoader(plugin.AssetLoader): def update(self, container: Dict, representation: Dict): collection = bpy.data.collections.get(container["objectName"]) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py index 04ece0b338..0a5d98ffa0 100644 --- a/openpype/hosts/blender/plugins/load/load_model.py +++ b/openpype/hosts/blender/plugins/load/load_model.py @@ -6,12 +6,14 @@ from typing import Dict, List, Optional import bpy -from avalon import api +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.blender.api import plugin from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) @@ -195,7 +197,7 @@ class BlendModelLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/load/load_rig.py b/openpype/hosts/blender/plugins/load/load_rig.py index eacabd3447..4dfa96167f 100644 --- a/openpype/hosts/blender/plugins/load/load_rig.py +++ b/openpype/hosts/blender/plugins/load/load_rig.py @@ -6,15 +6,19 @@ from typing import Dict, List, Optional import bpy -from avalon import api -from avalon.blender import lib as avalon_lib from openpype import lib -from openpype.pipeline import legacy_create -from openpype.hosts.blender.api import plugin +from openpype.pipeline import ( + legacy_create, + get_representation_path, + AVALON_CONTAINER_ID, +) +from openpype.hosts.blender.api import ( + plugin, + get_selection, +) from openpype.hosts.blender.api.pipeline import ( AVALON_CONTAINERS, AVALON_PROPERTY, - AVALON_CONTAINER_ID ) @@ -263,7 +267,7 @@ class BlendRigLoader(plugin.AssetLoader): if anim_file: bpy.ops.import_scene.fbx(filepath=anim_file, anim_offset=0.0) - imported = avalon_lib.get_selection() + imported = get_selection() armature = [ o for o in asset_group.children if o.type == 'ARMATURE'][0] @@ -307,7 +311,7 @@ class BlendRigLoader(plugin.AssetLoader): """ object_name = container["objectName"] asset_group = bpy.data.objects.get(object_name) - libpath = Path(api.get_representation_path(representation)) + libpath = Path(get_representation_path(representation)) extension = libpath.suffix.lower() self.log.info( diff --git a/openpype/hosts/blender/plugins/publish/extract_layout.py b/openpype/hosts/blender/plugins/publish/extract_layout.py index cc7c90f4c8..b78a193d81 100644 --- a/openpype/hosts/blender/plugins/publish/extract_layout.py +++ b/openpype/hosts/blender/plugins/publish/extract_layout.py @@ -1,6 +1,8 @@ import os import json +from bson.objectid import ObjectId + import bpy import bpy_extras import bpy_extras.anim_utils @@ -140,7 +142,7 @@ class ExtractLayout(openpype.api.Extractor): blend = io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "blend" }, projection={"_id": True}) @@ -151,7 +153,7 @@ class ExtractLayout(openpype.api.Extractor): fbx = io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "fbx" }, projection={"_id": True}) @@ -162,7 +164,7 @@ class ExtractLayout(openpype.api.Extractor): abc = io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "abc" }, projection={"_id": True}) diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index 56bbadd2fc..f210c27f87 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -68,7 +68,8 @@ from .workio import ( ) from .render_utils import ( export_clip, - get_preset_path_by_xml_name + get_preset_path_by_xml_name, + modify_preset_file ) __all__ = [ @@ -140,5 +141,6 @@ __all__ = [ # render utils "export_clip", - "get_preset_path_by_xml_name" + "get_preset_path_by_xml_name", + "modify_preset_file" ] diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py index f802cf160b..ca3f38c1bc 100644 --- a/openpype/hosts/flame/api/pipeline.py +++ b/openpype/hosts/flame/api/pipeline.py @@ -4,10 +4,15 @@ Basic avalon integration import os import contextlib from avalon import api as avalon -from avalon.pipeline import AVALON_CONTAINER_ID from pyblish import api as pyblish + from openpype.api import Logger -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) from .lib import ( set_segment_data_marker, set_publish_attribute, @@ -22,7 +27,6 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") AVALON_CONTAINERS = "AVALON_CONTAINERS" @@ -30,12 +34,10 @@ log = Logger.get_logger(__name__) def install(): - pyblish.register_host("flame") pyblish.register_plugin_path(PUBLISH_PATH) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) log.info("OpenPype Flame plug-ins registred ...") # register callback for switching publishable @@ -43,14 +45,14 @@ def install(): log.info("OpenPype Flame host installed ...") + def uninstall(): pyblish.deregister_host("flame") log.info("Deregistering Flame plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index 5221701a2f..4c9d3c5383 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -7,9 +7,11 @@ import six import qargparse from Qt import QtWidgets, QtCore import openpype.api as openpype -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from openpype import style -import avalon.api as avalon from . import ( lib as flib, pipeline as fpipeline, @@ -660,7 +662,7 @@ class PublishableClip: # Publishing plugin functions # Loader plugin functions -class ClipLoader(avalon.Loader): +class ClipLoader(LoaderPlugin): """A basic clip loader for Flame This will implement the basic behavior for a loader to inherit from that diff --git a/openpype/hosts/flame/api/render_utils.py b/openpype/hosts/flame/api/render_utils.py index 1b086646cc..473fb2f985 100644 --- a/openpype/hosts/flame/api/render_utils.py +++ b/openpype/hosts/flame/api/render_utils.py @@ -1,4 +1,5 @@ import os +from xml.etree import ElementTree as ET def export_clip(export_path, clip, preset_path, **kwargs): @@ -123,3 +124,29 @@ def get_preset_path_by_xml_name(xml_preset_name): # if nothing found then return False return False + + +def modify_preset_file(xml_path, staging_dir, data): + """Modify xml preset with input data + + Args: + xml_path (str ): path for input xml preset + staging_dir (str): staging dir path + data (dict): data where key is xmlTag and value as string + + Returns: + str: _description_ + """ + # create temp path + dirname, basename = os.path.split(xml_path) + temp_path = os.path.join(staging_dir, basename) + + # change xml following data keys + with open(xml_path, "r") as datafile: + tree = ET.parse(datafile) + for key, value in data.items(): + for element in tree.findall(".//{}".format(key)): + element.text = str(value) + tree.write(temp_path) + + return temp_path diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index c864399608..ee906c2608 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -420,13 +420,20 @@ class WireTapCom(object): RuntimeError: Not able to set colorspace policy """ color_policy = color_policy or "Legacy" + + # check if the colour policy in custom dir + if not os.path.exists(color_policy): + color_policy = "/syncolor/policies/Autodesk/{}".format( + color_policy) + + # create arguments project_colorspace_cmd = [ os.path.join( self.wiretap_tools_dir, "wiretap_duplicate_node" ), "-s", - "/syncolor/policies/Autodesk/{}".format(color_policy), + color_policy, "-n", "/projects/{}/syncolor".format(project_name) ] diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/openpype/hosts/flame/hooks/pre_flame_setup.py index 0d63b0d926..ad2b0dc897 100644 --- a/openpype/hosts/flame/hooks/pre_flame_setup.py +++ b/openpype/hosts/flame/hooks/pre_flame_setup.py @@ -73,7 +73,7 @@ class FlamePrelaunch(PreLaunchHook): "FrameWidth": int(width), "FrameHeight": int(height), "AspectRatio": float((width / height) * _db_p_data["pixelAspect"]), - "FrameRate": "{} fps".format(fps), + "FrameRate": self._get_flame_fps(fps), "FrameDepth": str(imageio_flame["project"]["frameDepth"]), "FieldDominance": str(imageio_flame["project"]["fieldDominance"]) } @@ -101,6 +101,28 @@ class FlamePrelaunch(PreLaunchHook): self.launch_context.launch_args.extend(app_arguments) + def _get_flame_fps(self, fps_num): + fps_table = { + float(23.976): "23.976 fps", + int(25): "25 fps", + int(24): "24 fps", + float(29.97): "29.97 fps DF", + int(30): "30 fps", + int(50): "50 fps", + float(59.94): "59.94 fps DF", + int(60): "60 fps" + } + + match_key = min(fps_table.keys(), key=lambda x: abs(x - fps_num)) + + try: + return fps_table[match_key] + except KeyError as msg: + raise KeyError(( + "Missing FPS key in conversion table. " + "Following keys are available: {}".format(fps_table.keys()) + )) from msg + def _add_pythonpath(self): pythonpath = self.launch_context.env.get("PYTHONPATH") diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 8ba01d6937..8980f72cb8 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -172,7 +172,7 @@ class LoadClip(opfapi.ClipLoader): # version_name = version.get("name", None) # colorspace = version_data.get("colorspace", None) # object_name = "{}_{}".format(name, namespace) - # file = api.get_representation_path(representation).replace("\\", "/") + # file = get_representation_path(representation).replace("\\", "/") # clip = track_item.source() # # reconnect media to new path diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 6424bce3bc..70340ad7a2 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -1,3 +1,4 @@ +import re import pyblish import openpype import openpype.hosts.flame.api as opfapi @@ -6,6 +7,10 @@ from openpype.hosts.flame.otio import flame_export # # developer reload modules from pprint import pformat +# constatns +NUM_PATERN = re.compile(r"([0-9\.]+)") +TXT_PATERN = re.compile(r"([a-zA-Z]+)") + class CollectTimelineInstances(pyblish.api.ContextPlugin): """Collect all Timeline segment selection.""" @@ -16,6 +21,16 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): audio_track_items = [] + # TODO: add to settings + # settings + xml_preset_attrs_from_comments = { + "width": "number", + "height": "number", + "pixelRatio": "float", + "resizeType": "string", + "resizeFilter": "string" + } + def process(self, context): project = context.data["flameProject"] sequence = context.data["flameSequence"] @@ -26,6 +41,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # process all sellected with opfapi.maintained_segment_selection(sequence) as segments: for segment in segments: + comment_attributes = self._get_comment_attributes(segment) + self.log.debug("_ comment_attributes: {}".format( + pformat(comment_attributes))) + clip_data = opfapi.get_segment_attributes(segment) clip_name = clip_data["segment_name"] self.log.debug("clip_name: {}".format(clip_name)) @@ -101,6 +120,9 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): # add resolution self._get_resolution_to_data(inst_data, context) + # add comment attributes if any + inst_data.update(comment_attributes) + # create instance instance = context.create_instance(**inst_data) @@ -126,6 +148,94 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): if marker_data.get("reviewTrack") is not None: instance.data["reviewAudio"] = True + def _get_comment_attributes(self, segment): + comment = segment.comment.get_value() + + # try to find attributes + attributes = { + "xml_overrides": { + "pixelRatio": 1.00} + } + # search for `:` + for split in self._split_comments(comment): + # make sure we ignore if not `:` in key + if ":" not in split: + continue + + self._get_xml_preset_attrs( + attributes, split) + + # add xml overides resolution to instance data + xml_overrides = attributes["xml_overrides"] + if xml_overrides.get("width"): + attributes.update({ + "resolutionWidth": xml_overrides["width"], + "resolutionHeight": xml_overrides["height"], + "pixelAspect": xml_overrides["pixelRatio"] + }) + + return attributes + + def _get_xml_preset_attrs(self, attributes, split): + + # split to key and value + key, value = split.split(":") + + for a_name, a_type in self.xml_preset_attrs_from_comments.items(): + # exclude all not related attributes + if a_name.lower() not in key.lower(): + continue + + # get pattern defined by type + pattern = TXT_PATERN + if a_type in ("number" , "float"): + pattern = NUM_PATERN + + res_goup = pattern.findall(value) + + # raise if nothing is found as it is not correctly defined + if not res_goup: + raise ValueError(( + "Value for `{}` attribute is not " + "set correctly: `{}`").format(a_name, split)) + + if "string" in a_type: + _value = res_goup[0] + if "float" in a_type: + _value = float(res_goup[0]) + if "number" in a_type: + _value = int(res_goup[0]) + + attributes["xml_overrides"][a_name] = _value + + # condition for resolution in key + if "resolution" in key.lower(): + res_goup = NUM_PATERN.findall(value) + # check if axpect was also defined + # 1920x1080x1.5 + aspect = res_goup[2] if len(res_goup) > 2 else 1 + + width = int(res_goup[0]) + height = int(res_goup[1]) + pixel_ratio = float(aspect) + attributes["xml_overrides"].update({ + "width": width, + "height": height, + "pixelRatio": pixel_ratio + }) + + def _split_comments(self, comment_string): + # first split comment by comma + split_comments = [] + if "," in comment_string: + split_comments.extend(comment_string.split(",")) + elif ";" in comment_string: + split_comments.extend(comment_string.split(";")) + else: + split_comments.append(comment_string) + + return split_comments + def _get_head_tail(self, clip_data, first_frame): # calculate head and tail with forward compatibility head = clip_data.get("segment_head") diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index db85bede85..32f6b9508f 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -1,6 +1,7 @@ import os from pprint import pformat from copy import deepcopy + import pyblish.api import openpype.api from openpype.hosts.flame import api as opfapi @@ -22,6 +23,8 @@ class ExtractSubsetResources(openpype.api.Extractor): "ext": "jpg", "xml_preset_file": "Jpeg (8-bit).xml", "xml_preset_dir": "", + "export_type": "File Sequence", + "ignore_comment_attrs": True, "colorspace_out": "Output - sRGB", "representation_add_range": False, "representation_tags": ["thumbnail"] @@ -30,6 +33,8 @@ class ExtractSubsetResources(openpype.api.Extractor): "ext": "mov", "xml_preset_file": "Apple iPad (1920x1080).xml", "xml_preset_dir": "", + "export_type": "Movie", + "ignore_comment_attrs": True, "colorspace_out": "Output - Rec.709", "representation_add_range": True, "representation_tags": [ @@ -54,21 +59,35 @@ class ExtractSubsetResources(openpype.api.Extractor): ): instance.data["representations"] = [] - frame_start = instance.data["frameStart"] - handle_start = instance.data["handleStart"] - frame_start_handle = frame_start - handle_start - source_first_frame = instance.data["sourceFirstFrame"] - source_start_handles = instance.data["sourceStartH"] - source_end_handles = instance.data["sourceEndH"] - source_duration_handles = ( - source_end_handles - source_start_handles) + 1 - + # flame objects + segment = instance.data["item"] + sequence_clip = instance.context.data["flameSequence"] clip_data = instance.data["flameSourceClip"] clip = clip_data["PyClip"] - in_mark = (source_start_handles - source_first_frame) + 1 - out_mark = in_mark + source_duration_handles + # segment's parent track name + s_track_name = segment.parent.name.get_value() + # get configured workfile frame start/end (handles excluded) + frame_start = instance.data["frameStart"] + # get media source first frame + source_first_frame = instance.data["sourceFirstFrame"] + + # get timeline in/out of segment + clip_in = instance.data["clipIn"] + clip_out = instance.data["clipOut"] + + # get handles value - take only the max from both + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleStart"] + handles = max(handle_start, handle_end) + + # get media source range with handles + source_end_handles = instance.data["sourceEndH"] + source_start_handles = instance.data["sourceStartH"] + source_end_handles = instance.data["sourceEndH"] + + # create staging dir path staging_dir = self.staging_dir(instance) # add default preset type for thumbnail and reviewable video @@ -77,15 +96,61 @@ class ExtractSubsetResources(openpype.api.Extractor): export_presets = deepcopy(self.default_presets) export_presets.update(self.export_presets_mapping) - # with maintained duplication loop all presets - with opfapi.maintained_object_duplication(clip) as duplclip: - # loop all preset names and - for unique_name, preset_config in export_presets.items(): + # loop all preset names and + for unique_name, preset_config in export_presets.items(): + modify_xml_data = {} + + # get all presets attributes + preset_file = preset_config["xml_preset_file"] + preset_dir = preset_config["xml_preset_dir"] + export_type = preset_config["export_type"] + repre_tags = preset_config["representation_tags"] + ignore_comment_attrs = preset_config["ignore_comment_attrs"] + color_out = preset_config["colorspace_out"] + + # get frame range with handles for representation range + frame_start_handle = frame_start - handle_start + source_duration_handles = ( + source_end_handles - source_start_handles) + 1 + + # define in/out marks + in_mark = (source_start_handles - source_first_frame) + 1 + out_mark = in_mark + source_duration_handles + + # by default export source clips + exporting_clip = clip + + if export_type == "Sequence Publish": + # change export clip to sequence + exporting_clip = sequence_clip + + # change in/out marks to timeline in/out + in_mark = clip_in + out_mark = clip_out + + # add xml tags modifications + modify_xml_data.update({ + "exportHandles": True, + "nbHandles": handles, + "startFrame": frame_start + }) + + if not ignore_comment_attrs: + # add any xml overrides collected form segment.comment + modify_xml_data.update(instance.data["xml_overrides"]) + + self.log.debug("__ modify_xml_data: {}".format(pformat( + modify_xml_data + ))) + + # with maintained duplication loop all presets + with opfapi.maintained_object_duplication( + exporting_clip) as duplclip: kwargs = {} - preset_file = preset_config["xml_preset_file"] - preset_dir = preset_config["xml_preset_dir"] - repre_tags = preset_config["representation_tags"] - color_out = preset_config["colorspace_out"] + + if export_type == "Sequence Publish": + # only keep visible layer where instance segment is child + self.hide_other_tracks(duplclip, s_track_name) # validate xml preset file is filled if preset_file == "": @@ -108,10 +173,13 @@ class ExtractSubsetResources(openpype.api.Extractor): ) # create preset path - preset_path = str(os.path.join( + preset_orig_xml_path = str(os.path.join( preset_dir, preset_file )) + preset_path = opfapi.modify_preset_file( + preset_orig_xml_path, staging_dir, modify_xml_data) + # define kwargs based on preset type if "thumbnail" in unique_name: kwargs["thumb_frame_number"] = in_mark + ( @@ -122,6 +190,7 @@ class ExtractSubsetResources(openpype.api.Extractor): "out_mark": out_mark }) + # get and make export dir paths export_dir_path = str(os.path.join( staging_dir, unique_name )) @@ -132,6 +201,7 @@ class ExtractSubsetResources(openpype.api.Extractor): export_dir_path, duplclip, preset_path, **kwargs) extension = preset_config["ext"] + # create representation data representation_data = { "name": unique_name, @@ -159,7 +229,12 @@ class ExtractSubsetResources(openpype.api.Extractor): # add files to represetation but add # imagesequence as list if ( - "movie_file" in preset_path + # first check if path in files is not mov extension + [ + f for f in files + if os.path.splitext(f)[-1] == ".mov" + ] + # then try if thumbnail is not in unique name or unique_name == "thumbnail" ): representation_data["files"] = files.pop() @@ -246,3 +321,19 @@ class ExtractSubsetResources(openpype.api.Extractor): ) return new_stage_dir, new_files_list + + def hide_other_tracks(self, sequence_clip, track_name): + """Helper method used only if sequence clip is used + + Args: + sequence_clip (flame.Clip): sequence clip + track_name (str): track name + """ + # create otio tracks and clips + for ver in sequence_clip.versions: + for track in ver.tracks: + if len(track.segments) == 0 and track.hidden: + continue + + if track.name.get_value() != track_name: + track.hidden = True diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index 5d97f83032..f7a2360bfa 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -3,10 +3,11 @@ import sys import re import contextlib +from bson.objectid import ObjectId from Qt import QtGui -import avalon.api from avalon import io +from openpype.pipeline import switch_container from .pipeline import get_current_comp, comp_lock_and_undo_chunk self = sys.modules[__name__] @@ -92,7 +93,7 @@ def switch_item(container, # Collect any of current asset, subset and representation if not provided # so we can use the original name from those. if any(not x for x in [asset_name, subset_name, representation_name]): - _id = io.ObjectId(container["representation"]) + _id = ObjectId(container["representation"]) representation = io.find_one({"type": "representation", "_id": _id}) version, subset, asset, project = io.parenthood(representation) @@ -142,7 +143,7 @@ def switch_item(container, assert representation, ("Could not find representation in the database " "with the name '%s'" % representation_name) - avalon.api.switch(container, representation) + switch_container(container, representation) return representation diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index 5ac56fcbed..c9cd76770a 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -8,10 +8,16 @@ import contextlib import pyblish.api import avalon.api -from avalon.pipeline import AVALON_CONTAINER_ID from openpype.api import Logger -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + register_inventory_action_path, + deregister_inventory_action_path, + AVALON_CONTAINER_ID, +) import openpype.hosts.fusion log = Logger().get_logger(__name__) @@ -63,9 +69,9 @@ def install(): pyblish.api.register_plugin_path(PUBLISH_PATH) log.info("Registering Fusion plug-ins..") - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) + register_inventory_action_path(INVENTORY_PATH) pyblish.api.register_callback( "instanceToggled", on_pyblish_instance_toggled @@ -87,11 +93,9 @@ def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) log.info("Deregistering Fusion plug-ins..") - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.deregister_plugin_path( - avalon.api.InventoryAction, INVENTORY_PATH - ) + deregister_inventory_action_path(INVENTORY_PATH) pyblish.api.deregister_callback( "instanceToggled", on_pyblish_instance_toggled diff --git a/openpype/hosts/fusion/api/workio.py b/openpype/hosts/fusion/api/workio.py index ec9ac7481a..a1710c6e3a 100644 --- a/openpype/hosts/fusion/api/workio.py +++ b/openpype/hosts/fusion/api/workio.py @@ -1,12 +1,14 @@ """Host API required Work Files tool""" import sys import os -from avalon import api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS + from .pipeline import get_current_comp def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["fusion"] + return HOST_WORKFILE_EXTENSIONS["fusion"] def has_unsaved_changes(): diff --git a/openpype/hosts/fusion/plugins/inventory/select_containers.py b/openpype/hosts/fusion/plugins/inventory/select_containers.py index 294c134505..d554b73a5b 100644 --- a/openpype/hosts/fusion/plugins/inventory/select_containers.py +++ b/openpype/hosts/fusion/plugins/inventory/select_containers.py @@ -1,7 +1,7 @@ -from avalon import api +from openpype.pipeline import InventoryAction -class FusionSelectContainers(api.InventoryAction): +class FusionSelectContainers(InventoryAction): label = "Select Containers" icon = "mouse-pointer" diff --git a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py b/openpype/hosts/fusion/plugins/inventory/set_tool_color.py index 2f5ae4d241..c7530ce674 100644 --- a/openpype/hosts/fusion/plugins/inventory/set_tool_color.py +++ b/openpype/hosts/fusion/plugins/inventory/set_tool_color.py @@ -1,6 +1,6 @@ -from avalon import api from Qt import QtGui, QtWidgets +from openpype.pipeline import InventoryAction from openpype import style from openpype.hosts.fusion.api import ( get_current_comp, @@ -8,7 +8,7 @@ from openpype.hosts.fusion.api import ( ) -class FusionSetToolColor(api.InventoryAction): +class FusionSetToolColor(InventoryAction): """Update the color of the selected tools""" label = "Set Tool Color" diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py index 6af99e4c56..bc59cec77f 100644 --- a/openpype/hosts/fusion/plugins/load/actions.py +++ b/openpype/hosts/fusion/plugins/load/actions.py @@ -2,10 +2,10 @@ """ -from avalon import api +from openpype.pipeline import load -class FusionSetFrameRangeLoader(api.Loader): +class FusionSetFrameRangeLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -39,7 +39,7 @@ class FusionSetFrameRangeLoader(api.Loader): lib.update_frame_range(start, end) -class FusionSetFrameRangeWithHandlesLoader(api.Loader): +class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index ea118585bf..075820de35 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,8 +1,12 @@ import os import contextlib -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.fusion.api import ( imprint_container, get_current_comp, @@ -117,7 +121,7 @@ def loader_shift(loader, frame, relative=True): return int(shift) -class FusionLoadSequence(api.Loader): +class FusionLoadSequence(load.LoaderPlugin): """Load image sequence into Fusion""" families = ["imagesequence", "review", "render"] @@ -204,7 +208,7 @@ class FusionLoadSequence(api.Loader): assert tool.ID == "Loader", "Must be Loader" comp = tool.Comp() - root = os.path.dirname(api.get_representation_path(representation)) + root = os.path.dirname(get_representation_path(representation)) path = self._get_first_image(root) # Get start frame from version data diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md index a8d182736a..e8d354e1e6 100644 --- a/openpype/hosts/harmony/api/README.md +++ b/openpype/hosts/harmony/api/README.md @@ -575,7 +575,7 @@ replace_files = """function %s_replace_files(args) """ % (signature, signature) -class ImageSequenceLoader(api.Loader): +class ImageSequenceLoader(load.LoaderPlugin): """Load images Stores the imported asset in a container named after the asset. """ diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/openpype/hosts/harmony/api/TB_sceneOpened.js index 5a3fe9ce82..6a403fa65e 100644 --- a/openpype/hosts/harmony/api/TB_sceneOpened.js +++ b/openpype/hosts/harmony/api/TB_sceneOpened.js @@ -272,8 +272,8 @@ function Client() { app.avalonClient.send( { - 'module': 'avalon.api', - 'method': 'emit', + 'module': 'openpype.lib', + 'method': 'emit_event', 'args': ['application.launched'] }, false); }; diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 928bf926ff..420e9720db 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -2,15 +2,20 @@ import os from pathlib import Path import logging +from bson.objectid import ObjectId import pyblish.api from avalon import io import avalon.api -from avalon.pipeline import AVALON_CONTAINER_ID from openpype import lib from openpype.lib import register_event_callback -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) import openpype.hosts.harmony import openpype.hosts.harmony.api as harmony @@ -109,7 +114,7 @@ def check_inventory(): representation = container['representation'] representation_doc = io.find_one( { - "_id": io.ObjectId(representation), + "_id": ObjectId(representation), "type": "representation" }, projection={"parent": True} @@ -180,7 +185,7 @@ def install(): pyblish.api.register_host("harmony") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info(PUBLISH_PATH) @@ -194,7 +199,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) diff --git a/openpype/hosts/harmony/api/workio.py b/openpype/hosts/harmony/api/workio.py index 38a00ae414..ab1cb9b1a9 100644 --- a/openpype/hosts/harmony/api/workio.py +++ b/openpype/hosts/harmony/api/workio.py @@ -2,20 +2,21 @@ import os import shutil +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS + from .lib import ( ProcessContext, get_local_harmony_path, zip_and_move, launch_zip_file ) -from avalon import api # used to lock saving until previous save is done. save_disabled = False def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["harmony"] + return HOST_WORKFILE_EXTENSIONS["harmony"] def has_unsaved_changes(): diff --git a/openpype/hosts/harmony/plugins/load/load_audio.py b/openpype/hosts/harmony/plugins/load/load_audio.py index 57ea8ae312..e18a6de097 100644 --- a/openpype/hosts/harmony/plugins/load/load_audio.py +++ b/openpype/hosts/harmony/plugins/load/load_audio.py @@ -1,4 +1,7 @@ -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony sig = harmony.signature() @@ -29,7 +32,7 @@ function %s(args) """ % (sig, sig) -class ImportAudioLoader(api.Loader): +class ImportAudioLoader(load.LoaderPlugin): """Import audio.""" families = ["shot", "audio"] @@ -37,7 +40,7 @@ class ImportAudioLoader(api.Loader): label = "Import Audio" def load(self, context, name=None, namespace=None, data=None): - wav_file = api.get_representation_path(context["representation"]) + wav_file = get_representation_path(context["representation"]) harmony.send( {"function": func, "args": [context["subset"]["name"], wav_file]} ) diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py index 686d6b5b7b..9c01fe3cd8 100644 --- a/openpype/hosts/harmony/plugins/load/load_background.py +++ b/openpype/hosts/harmony/plugins/load/load_background.py @@ -1,7 +1,10 @@ import os import json -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony import openpype.lib @@ -226,7 +229,7 @@ replace_files """ -class BackgroundLoader(api.Loader): +class BackgroundLoader(load.LoaderPlugin): """Load images Stores the imported asset in a container named after the asset. """ @@ -278,7 +281,7 @@ class BackgroundLoader(api.Loader): def update(self, container, representation): - path = api.get_representation_path(representation) + path = get_representation_path(representation) with open(path) as json_file: data = json.load(json_file) @@ -297,7 +300,7 @@ class BackgroundLoader(api.Loader): bg_folder = os.path.dirname(path) - path = api.get_representation_path(representation) + path = get_representation_path(representation) print(container) diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/openpype/hosts/harmony/plugins/load/load_imagesequence.py index 310f9bdb61..18695438d5 100644 --- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py +++ b/openpype/hosts/harmony/plugins/load/load_imagesequence.py @@ -6,12 +6,15 @@ from pathlib import Path import clique -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony import openpype.lib -class ImageSequenceLoader(api.Loader): +class ImageSequenceLoader(load.LoaderPlugin): """Load image sequences. Stores the imported asset in a container named after the asset. @@ -79,7 +82,7 @@ class ImageSequenceLoader(api.Loader): self_name = self.__class__.__name__ node = container.get("nodes").pop() - path = api.get_representation_path(representation) + path = get_representation_path(representation) collections, remainder = clique.assemble( os.listdir(os.path.dirname(path)) ) diff --git a/openpype/hosts/harmony/plugins/load/load_palette.py b/openpype/hosts/harmony/plugins/load/load_palette.py index 2e0f70d135..1da3e61e1b 100644 --- a/openpype/hosts/harmony/plugins/load/load_palette.py +++ b/openpype/hosts/harmony/plugins/load/load_palette.py @@ -1,11 +1,14 @@ import os import shutil -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony -class ImportPaletteLoader(api.Loader): +class ImportPaletteLoader(load.LoaderPlugin): """Import palettes.""" families = ["palette", "harmony.palette"] @@ -31,7 +34,7 @@ class ImportPaletteLoader(api.Loader): scene_path = harmony.send( {"function": "scene.currentProjectPath"} )["result"] - src = api.get_representation_path(representation) + src = get_representation_path(representation) dst = os.path.join( scene_path, "palette-library", diff --git a/openpype/hosts/harmony/plugins/load/load_template.py b/openpype/hosts/harmony/plugins/load/load_template.py index 112e613ae6..c6dc9d913b 100644 --- a/openpype/hosts/harmony/plugins/load/load_template.py +++ b/openpype/hosts/harmony/plugins/load/load_template.py @@ -6,12 +6,15 @@ import os import shutil import uuid -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony import openpype.lib -class TemplateLoader(api.Loader): +class TemplateLoader(load.LoaderPlugin): """Load Harmony template as container. .. todo:: @@ -38,7 +41,7 @@ class TemplateLoader(api.Loader): # Load template. self_name = self.__class__.__name__ temp_dir = tempfile.mkdtemp() - zip_file = api.get_representation_path(context["representation"]) + zip_file = get_representation_path(context["representation"]) template_path = os.path.join(temp_dir, "temp.tpl") with zipfile.ZipFile(zip_file, "r") as zip_ref: zip_ref.extractall(template_path) diff --git a/openpype/hosts/harmony/plugins/load/load_template_workfile.py b/openpype/hosts/harmony/plugins/load/load_template_workfile.py index c21b8194b1..2b84a43b35 100644 --- a/openpype/hosts/harmony/plugins/load/load_template_workfile.py +++ b/openpype/hosts/harmony/plugins/load/load_template_workfile.py @@ -3,11 +3,14 @@ import zipfile import os import shutil -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) import openpype.hosts.harmony.api as harmony -class ImportTemplateLoader(api.Loader): +class ImportTemplateLoader(load.LoaderPlugin): """Import templates.""" families = ["harmony.template", "workfile"] @@ -17,7 +20,7 @@ class ImportTemplateLoader(api.Loader): def load(self, context, name=None, namespace=None, data=None): # Import template. temp_dir = tempfile.mkdtemp() - zip_file = api.get_representation_path(context["representation"]) + zip_file = get_representation_path(context["representation"]) template_path = os.path.join(temp_dir, "temp.tpl") with zipfile.ZipFile(zip_file, "r") as zip_ref: zip_ref.extractall(template_path) diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py index 9439199933..7fab3edfc8 100644 --- a/openpype/hosts/hiero/api/events.py +++ b/openpype/hosts/hiero/api/events.py @@ -1,12 +1,12 @@ import os import hiero.core.events from openpype.api import Logger +from openpype.lib import register_event_callback from .lib import ( sync_avalon_data_to_workfile, launch_workfiles_app, selection_changed_timeline, before_project_save, - register_event_callback ) from .tags import add_tags_to_workfile from .menu import update_menu_task_label diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index a9467ae5a4..df3b24ff2c 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -8,7 +8,10 @@ import platform import ast import shutil import hiero + from Qt import QtWidgets +from bson.objectid import ObjectId + import avalon.api as avalon import avalon.io from openpype.api import (Logger, Anatomy, get_anatomy_settings) @@ -1006,7 +1009,7 @@ def check_inventory_versions(): # get representation from io representation = io.find_one({ "type": "representation", - "_id": io.ObjectId(container["representation"]) + "_id": ObjectId(container["representation"]) }) # Get start frame from version data diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index 5cb23ea355..0d3c8914ce 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -4,12 +4,17 @@ Basic avalon integration import os import contextlib from collections import OrderedDict -from avalon.pipeline import AVALON_CONTAINER_ID + from avalon import api as avalon from avalon import schema from pyblish import api as pyblish from openpype.api import Logger -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) from openpype.tools.utils import host_tools from . import lib, menu, events @@ -24,7 +29,6 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish").replace("\\", "/") LOAD_PATH = os.path.join(PLUGINS_DIR, "load").replace("\\", "/") CREATE_PATH = os.path.join(PLUGINS_DIR, "create").replace("\\", "/") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory").replace("\\", "/") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -45,9 +49,8 @@ def install(): log.info("Registering Hiero plug-ins..") pyblish.register_host("hiero") pyblish.register_plugin_path(PUBLISH_PATH) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) @@ -67,7 +70,7 @@ def uninstall(): log.info("Deregistering Hiero plug-ins..") pyblish.deregister_host("hiero") pyblish.deregister_plugin_path(PUBLISH_PATH) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) # register callback for switching publishable diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 53928aca41..54e66bf99a 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -6,9 +6,9 @@ import hiero from Qt import QtWidgets, QtCore import qargparse -import avalon.api as avalon + import openpype.api as openpype -from openpype.pipeline import LegacyCreator +from openpype.pipeline import LoaderPlugin, LegacyCreator from . import lib log = openpype.Logger().get_logger(__name__) @@ -306,7 +306,7 @@ def get_reference_node_parents(ref): return parents -class SequenceLoader(avalon.Loader): +class SequenceLoader(LoaderPlugin): """A basic SequenceLoader for Resolve This will implement the basic behavior for a loader to inherit from that diff --git a/openpype/hosts/hiero/api/workio.py b/openpype/hosts/hiero/api/workio.py index dacb11624f..394cb5e2ab 100644 --- a/openpype/hosts/hiero/api/workio.py +++ b/openpype/hosts/hiero/api/workio.py @@ -1,14 +1,14 @@ import os import hiero -from avalon import api + from openpype.api import Logger +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS - -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["hiero"] + return HOST_WORKFILE_EXTENSIONS["hiero"] def has_unsaved_changes(): diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index b905dd4431..d3908695a2 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -1,4 +1,5 @@ -from avalon import io, api +from avalon import io +from openpype.pipeline import get_representation_path import openpype.hosts.hiero.api as phiero # from openpype.hosts.hiero.api import plugin, lib # reload(lib) @@ -112,7 +113,7 @@ class LoadClip(phiero.SequenceLoader): version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") clip = track_item.source() # reconnect media to new path diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 6cfb713661..d079c9ea81 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -8,10 +8,13 @@ import hdefereval import pyblish.api import avalon.api -from avalon.pipeline import AVALON_CONTAINER_ID from avalon.lib import find_submodule -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + AVALON_CONTAINER_ID, +) import openpype.hosts.houdini from openpype.hosts.houdini.api import lib @@ -50,7 +53,7 @@ def install(): pyblish.api.register_host("hpython") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info("Installing callbacks ... ") diff --git a/openpype/hosts/houdini/api/workio.py b/openpype/hosts/houdini/api/workio.py index e7310163ea..e0213023fd 100644 --- a/openpype/hosts/houdini/api/workio.py +++ b/openpype/hosts/houdini/api/workio.py @@ -2,11 +2,11 @@ import os import hou -from avalon import api +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["houdini"] + return HOST_WORKFILE_EXTENSIONS["houdini"] def has_unsaved_changes(): diff --git a/openpype/hosts/houdini/plugins/load/actions.py b/openpype/hosts/houdini/plugins/load/actions.py index acdb998c16..63d74c39a5 100644 --- a/openpype/hosts/houdini/plugins/load/actions.py +++ b/openpype/hosts/houdini/plugins/load/actions.py @@ -2,10 +2,10 @@ """ -from avalon import api +from openpype.pipeline import load -class SetFrameRangeLoader(api.Loader): +class SetFrameRangeLoader(load.LoaderPlugin): """Set Houdini frame range""" families = [ @@ -43,7 +43,7 @@ class SetFrameRangeLoader(api.Loader): hou.playbar.setPlaybackRange(start, end) -class SetFrameRangeWithHandlesLoader(api.Loader): +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Set Maya frame range including pre- and post-handles""" families = [ diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py index eaab81f396..0214229d5a 100644 --- a/openpype/hosts/houdini/plugins/load/load_alembic.py +++ b/openpype/hosts/houdini/plugins/load/load_alembic.py @@ -1,10 +1,12 @@ import os -from avalon import api - +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline -class AbcLoader(api.Loader): +class AbcLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["model", "animation", "pointcache", "gpuCache"] @@ -90,7 +92,7 @@ class AbcLoader(api.Loader): return # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") alembic_node.setParms({"fileName": file_path}) diff --git a/openpype/hosts/houdini/plugins/load/load_ass.py b/openpype/hosts/houdini/plugins/load/load_ass.py index 8c272044ec..0144bbaefd 100644 --- a/openpype/hosts/houdini/plugins/load/load_ass.py +++ b/openpype/hosts/houdini/plugins/load/load_ass.py @@ -1,11 +1,15 @@ import os -from avalon import api -from avalon.houdini import pipeline import clique +from openpype.pipeline import ( + load, + get_representation_path, +) + +from openpype.hosts.houdini.api import pipeline -class AssLoader(api.Loader): +class AssLoader(load.LoaderPlugin): """Load .ass with Arnold Procedural""" families = ["ass"] @@ -88,7 +92,7 @@ class AssLoader(api.Loader): def update(self, container, representation): # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") procedural = container["node"] diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py index 8916d3b9b7..ef57d115da 100644 --- a/openpype/hosts/houdini/plugins/load/load_camera.py +++ b/openpype/hosts/houdini/plugins/load/load_camera.py @@ -1,4 +1,7 @@ -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline @@ -74,7 +77,7 @@ def transfer_non_default_values(src, dest, ignore=None): dest_parm.setFromParm(parm) -class CameraLoader(api.Loader): +class CameraLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["camera"] @@ -129,7 +132,7 @@ class CameraLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") # Update attributes diff --git a/openpype/hosts/houdini/plugins/load/load_hda.py b/openpype/hosts/houdini/plugins/load/load_hda.py index f5f2fb7481..2438570c6e 100644 --- a/openpype/hosts/houdini/plugins/load/load_hda.py +++ b/openpype/hosts/houdini/plugins/load/load_hda.py @@ -1,10 +1,13 @@ # -*- coding: utf-8 -*- -from avalon import api - +import os +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline -class HdaLoader(api.Loader): +class HdaLoader(load.LoaderPlugin): """Load Houdini Digital Asset file.""" families = ["hda"] @@ -15,7 +18,6 @@ class HdaLoader(api.Loader): color = "orange" def load(self, context, name=None, namespace=None, data=None): - import os import hou # Format file name, Houdini only wants forward slashes @@ -49,7 +51,7 @@ class HdaLoader(api.Loader): import hou hda_node = container["node"] - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") hou.hda.installFile(file_path) defs = hda_node.type().allInstalledDefinitions() diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py index 39f583677b..671f08f18f 100644 --- a/openpype/hosts/houdini/plugins/load/load_image.py +++ b/openpype/hosts/houdini/plugins/load/load_image.py @@ -1,6 +1,10 @@ import os -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.houdini.api import lib, pipeline import hou @@ -37,7 +41,7 @@ def get_image_avalon_container(): return image_container -class ImageLoader(api.Loader): +class ImageLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["colorbleed.imagesequence"] @@ -70,7 +74,7 @@ class ImageLoader(api.Loader): # Imprint it manually data = { "schema": "avalon-core:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, "loader": str(self.__class__.__name__), @@ -87,7 +91,7 @@ class ImageLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") file_path = self._get_file_sequence(file_path) diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py index 0d4378b480..48580fc3aa 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_layer.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py @@ -1,8 +1,12 @@ -from avalon import api -from openpype.hosts.houdini.api import lib, pipeline +from openpype.pipeline import ( + load, + get_representation_path, + AVALON_CONTAINER_ID, +) +from openpype.hosts.houdini.api import lib -class USDSublayerLoader(api.Loader): +class USDSublayerLoader(load.LoaderPlugin): """Sublayer USD file in Solaris""" families = [ @@ -40,7 +44,7 @@ class USDSublayerLoader(api.Loader): # Imprint it manually data = { "schema": "avalon-core:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, "loader": str(self.__class__.__name__), @@ -57,7 +61,7 @@ class USDSublayerLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") # Update attributes diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py index 0edd8d9af6..6851c77e6d 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_reference.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py @@ -1,8 +1,12 @@ -from avalon import api -from openpype.hosts.houdini.api import lib, pipeline +from openpype.pipeline import ( + load, + get_representation_path, + AVALON_CONTAINER_ID, +) +from openpype.hosts.houdini.api import lib -class USDReferenceLoader(api.Loader): +class USDReferenceLoader(load.LoaderPlugin): """Reference USD file in Solaris""" families = [ @@ -40,7 +44,7 @@ class USDReferenceLoader(api.Loader): # Imprint it manually data = { "schema": "avalon-core:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": node_name, "namespace": namespace, "loader": str(self.__class__.__name__), @@ -57,7 +61,7 @@ class USDReferenceLoader(api.Loader): node = container["node"] # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = file_path.replace("\\", "/") # Update attributes diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py index 40aa7a1d18..06bb9e45e4 100644 --- a/openpype/hosts/houdini/plugins/load/load_vdb.py +++ b/openpype/hosts/houdini/plugins/load/load_vdb.py @@ -1,11 +1,14 @@ import os import re -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.houdini.api import pipeline -class VdbLoader(api.Loader): +class VdbLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["vdbcache"] @@ -96,7 +99,7 @@ class VdbLoader(api.Loader): return # Update the file path - file_path = api.get_representation_path(representation) + file_path = get_representation_path(representation) file_path = self.format_path(file_path) file_node.setParms({"fileName": file_path}) diff --git a/openpype/hosts/houdini/plugins/load/show_usdview.py b/openpype/hosts/houdini/plugins/load/show_usdview.py index f23974094e..8066615181 100644 --- a/openpype/hosts/houdini/plugins/load/show_usdview.py +++ b/openpype/hosts/houdini/plugins/load/show_usdview.py @@ -1,7 +1,7 @@ -from avalon import api +from openpype.pipeline import load -class ShowInUsdview(api.Loader): +class ShowInUsdview(load.LoaderPlugin): """Open USD file in usdview""" families = ["colorbleed.usd"] diff --git a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py index 645bd05d4b..3e842ae766 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py +++ b/openpype/hosts/houdini/plugins/publish/extract_usd_layered.py @@ -7,6 +7,7 @@ from collections import deque import pyblish.api import openpype.api +from openpype.pipeline import get_representation_path import openpype.hosts.houdini.api.usd as hou_usdlib from openpype.hosts.houdini.api.lib import render_rop @@ -308,7 +309,7 @@ class ExtractUSDLayered(openpype.api.Extractor): self.log.debug("No existing representation..") return False - old_file = api.get_representation_path(representation) + old_file = get_representation_path(representation) if not os.path.exists(old_file): return False diff --git a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py index 4071eb3e0c..499b733570 100644 --- a/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py +++ b/openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py @@ -145,7 +145,6 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase): path = self._template.format(**{ "root": root, "project": PROJECT, - "silo": asset_doc["silo"], "asset": asset_doc["name"], "subset": subset, "representation": ext, @@ -165,4 +164,3 @@ output_processor = AvalonURIOutputProcessor() def usdOutputProcessor(): return output_processor - diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 41c67a6209..376c033d46 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -17,10 +17,16 @@ import bson from maya import cmds, mel import maya.api.OpenMaya as om -from avalon import api, io, pipeline +from avalon import api, io from openpype import lib from openpype.api import get_anatomy_settings +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + get_representation_path, + load_container, +) from .commands import reset_frame_range @@ -1580,21 +1586,21 @@ def assign_look_by_version(nodes, version_id): log.info("Using look for the first time ..") # Load file - loaders = api.loaders_from_representation(api.discover(api.Loader), - representation_id) + _loaders = discover_loader_plugins() + loaders = loaders_from_representation(_loaders, representation_id) Loader = next((i for i in loaders if i.__name__ == "LookLoader"), None) if Loader is None: raise RuntimeError("Could not find LookLoader, this is a bug") # Reference the look file with maintained_selection(): - container_node = pipeline.load(Loader, look_representation) + container_node = load_container(Loader, look_representation) # Get container members shader_nodes = get_container_members(container_node) # Load relationships - shader_relation = api.get_representation_path(json_representation) + shader_relation = get_representation_path(json_representation) with open(shader_relation, "r") as f: relationships = json.load(f) @@ -1931,18 +1937,26 @@ def remove_other_uv_sets(mesh): cmds.removeMultiInstance(attr, b=True) -def get_id_from_history(node): +def get_id_from_sibling(node, history_only=True): """Return first node id in the history chain that matches this node. The nodes in history must be of the exact same node type and must be parented under the same parent. + Optionally, if no matching node is found from the history, all the + siblings of the node that are of the same type are checked. + Additionally to having the same parent, the sibling must be marked as + 'intermediate object'. + Args: - node (str): node to retrieve the + node (str): node to retrieve the history from + history_only (bool): if True and if nothing found in history, + look for an 'intermediate object' in all the node's siblings + of same type Returns: - str or None: The id from the node in history or None when no id found - on any valid nodes in the history. + str or None: The id from the sibling node or None when no id found + on any valid nodes in the history or siblings. """ @@ -1971,6 +1985,45 @@ def get_id_from_history(node): if _id: return _id + if not history_only: + # Get siblings of same type + similar_nodes = cmds.listRelatives(parent, + type=node_type, + fullPath=True) + similar_nodes = cmds.ls(similar_nodes, exactType=node_type, long=True) + + # Exclude itself + similar_nodes = [x for x in similar_nodes if x != node] + + # Get all unique ids from siblings in order since + # we consistently take the first one found + sibling_ids = OrderedDict() + for similar_node in similar_nodes: + # Check if "intermediate object" + if not cmds.getAttr(similar_node + ".intermediateObject"): + continue + + _id = get_id(similar_node) + if not _id: + continue + + if _id in sibling_ids: + sibling_ids[_id].append(similar_node) + else: + sibling_ids[_id] = [similar_node] + + if sibling_ids: + first_id, found_nodes = next(iter(sibling_ids.items())) + + # Log a warning if we've found multiple unique ids + if len(sibling_ids) > 1: + log.warning(("Found more than 1 intermediate shape with" + " unique id for '{}'. Using id of first" + " found: '{}'".format(node, found_nodes[0]))) + + return first_id + + # Project settings def set_scene_fps(fps, update=True): diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index bdb4e5dfa8..bb61128178 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -10,7 +10,6 @@ import pyblish.api import avalon.api from avalon.lib import find_submodule -from avalon.pipeline import AVALON_CONTAINER_ID import openpype.hosts.maya from openpype.tools.utils import host_tools @@ -20,7 +19,14 @@ from openpype.lib import ( emit_event ) from openpype.lib.path_tools import HostDirmap -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + register_inventory_action_path, + deregister_loader_plugin_path, + deregister_inventory_action_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.maya.lib import copy_workspace_mel from . import menu, lib @@ -53,9 +59,9 @@ def install(): pyblish.api.register_host("mayapy") pyblish.api.register_host("maya") - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) + register_inventory_action_path(INVENTORY_PATH) log.info(PUBLISH_PATH) log.info("Installing callbacks ... ") @@ -182,11 +188,9 @@ def uninstall(): pyblish.api.deregister_host("mayapy") pyblish.api.deregister_host("maya") - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.deregister_plugin_path( - avalon.api.InventoryAction, INVENTORY_PATH - ) + deregister_inventory_action_path(INVENTORY_PATH) menu.uninstall() diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 48d7c465ec..3721868823 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -4,9 +4,12 @@ from maya import cmds import qargparse -from avalon import api -from avalon.pipeline import AVALON_CONTAINER_ID -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, + get_representation_path, + AVALON_CONTAINER_ID, +) from .pipeline import containerise from . import lib @@ -95,7 +98,7 @@ class Creator(LegacyCreator): return instance -class Loader(api.Loader): +class Loader(LoaderPlugin): hosts = ["maya"] @@ -194,7 +197,7 @@ class ReferenceLoader(Loader): node = container["objectName"] - path = api.get_representation_path(representation) + path = get_representation_path(representation) # Get reference node from container members members = get_container_members(node) diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index 1a7c3933a1..0b60564e5e 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -6,9 +6,19 @@ import contextlib import copy import six +from bson.objectid import ObjectId + from maya import cmds -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + load_container, + update_container, + remove_container, + get_representation_path, +) from openpype.hosts.maya.api.lib import ( matrix_equals, unique_namespace @@ -120,12 +130,13 @@ def load_package(filepath, name, namespace=None): root = "{}:{}".format(namespace, name) containers = [] - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() for representation_id, instances in data.items(): # Find the compatible loaders - loaders = api.loaders_from_representation(all_loaders, - representation_id) + loaders = loaders_from_representation( + all_loaders, representation_id + ) for instance in instances: container = _add(instance=instance, @@ -180,9 +191,11 @@ def _add(instance, representation_id, loaders, namespace, root="|"): instance['loader'], instance) raise RuntimeError("Loader is missing.") - container = api.load(Loader, - representation_id, - namespace=instance['namespace']) + container = load_container( + Loader, + representation_id, + namespace=instance['namespace'] + ) # Get the root from the loaded container loaded_root = get_container_transforms({"objectName": container}, @@ -271,7 +284,7 @@ def update_package_version(container, version): # Versioning (from `core.maya.pipeline`) current_representation = io.find_one({ - "_id": io.ObjectId(container["representation"]) + "_id": ObjectId(container["representation"]) }) assert current_representation is not None, "This is a bug" @@ -316,17 +329,17 @@ def update_package(set_container, representation): # Load the original package data current_representation = io.find_one({ - "_id": io.ObjectId(set_container['representation']), + "_id": ObjectId(set_container['representation']), "type": "representation" }) - current_file = api.get_representation_path(current_representation) + current_file = get_representation_path(current_representation) assert current_file.endswith(".json") with open(current_file, "r") as fp: current_data = json.load(fp) # Load the new package data - new_file = api.get_representation_path(representation) + new_file = get_representation_path(representation) assert new_file.endswith(".json") with open(new_file, "r") as fp: new_data = json.load(fp) @@ -460,17 +473,17 @@ def update_scene(set_container, containers, current_data, new_data, new_file): # considered as new element and added afterwards. processed_containers.pop() processed_namespaces.remove(container_ns) - api.remove(container) + remove_container(container) continue # Check whether the conversion can be done by the Loader. # They *must* use the same asset, subset and Loader for - # `api.update` to make sense. + # `update_container` to make sense. old = io.find_one({ - "_id": io.ObjectId(representation_current) + "_id": ObjectId(representation_current) }) new = io.find_one({ - "_id": io.ObjectId(representation_new) + "_id": ObjectId(representation_new) }) is_valid = compare_representations(old=old, new=new) if not is_valid: @@ -479,20 +492,21 @@ def update_scene(set_container, containers, current_data, new_data, new_file): continue new_version = new["context"]["version"] - api.update(container, version=new_version) + update_container(container, version=new_version) else: # Remove this container because it's not in the new data log.warning("Removing content: %s", container_ns) - api.remove(container) + remove_container(container) # Add new assets - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() for representation_id, instances in new_data.items(): # Find the compatible loaders - loaders = api.loaders_from_representation(all_loaders, - representation_id) + loaders = loaders_from_representation( + all_loaders, representation_id + ) for instance in instances: # Already processed in update functionality @@ -517,7 +531,7 @@ def update_scene(set_container, containers, current_data, new_data, new_file): def compare_representations(old, new): """Check if the old representation given can be updated - Due to limitations of the `api.update` function we cannot allow + Due to limitations of the `update_container` function we cannot allow differences in the following data: * Representation name (extension) diff --git a/openpype/hosts/maya/api/workio.py b/openpype/hosts/maya/api/workio.py index 698c48e81e..fd4961c4bf 100644 --- a/openpype/hosts/maya/api/workio.py +++ b/openpype/hosts/maya/api/workio.py @@ -1,11 +1,12 @@ """Host API required Work Files tool""" import os from maya import cmds -from avalon import api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS def file_extensions(): - return api.HOST_WORKFILE_EXTENSIONS["maya"] + return HOST_WORKFILE_EXTENSIONS["maya"] def has_unsaved_changes(): diff --git a/openpype/hosts/maya/plugins/inventory/import_modelrender.py b/openpype/hosts/maya/plugins/inventory/import_modelrender.py index 119edccb7a..d9bb256fac 100644 --- a/openpype/hosts/maya/plugins/inventory/import_modelrender.py +++ b/openpype/hosts/maya/plugins/inventory/import_modelrender.py @@ -1,12 +1,18 @@ import json -from avalon import api, io, pipeline +from avalon import io +from bson.objectid import ObjectId +from openpype.pipeline import ( + InventoryAction, + get_representation_context, + get_representation_path_from_context, +) from openpype.hosts.maya.api.lib import ( maintained_selection, apply_shaders ) -class ImportModelRender(api.InventoryAction): +class ImportModelRender(InventoryAction): label = "Import Model Render Sets" icon = "industry" @@ -35,7 +41,7 @@ class ImportModelRender(api.InventoryAction): nodes.append(n) repr_doc = io.find_one({ - "_id": io.ObjectId(container["representation"]), + "_id": ObjectId(container["representation"]), }) version_id = repr_doc["parent"] @@ -73,11 +79,11 @@ class ImportModelRender(api.InventoryAction): "name": self.look_data_type, }) - context = pipeline.get_representation_context(look_repr["_id"]) - maya_file = pipeline.get_representation_path_from_context(context) + context = get_representation_context(look_repr["_id"]) + maya_file = get_representation_path_from_context(context) - context = pipeline.get_representation_context(json_repr["_id"]) - json_file = pipeline.get_representation_path_from_context(context) + context = get_representation_context(json_repr["_id"]) + json_file = get_representation_path_from_context(context) # Import the look file with maintained_selection(): diff --git a/openpype/hosts/maya/plugins/inventory/import_reference.py b/openpype/hosts/maya/plugins/inventory/import_reference.py index 2fa132a867..afb1e0e17f 100644 --- a/openpype/hosts/maya/plugins/inventory/import_reference.py +++ b/openpype/hosts/maya/plugins/inventory/import_reference.py @@ -1,11 +1,10 @@ from maya import cmds -from avalon import api - +from openpype.pipeline import InventoryAction from openpype.hosts.maya.api.plugin import get_reference_node -class ImportReference(api.InventoryAction): +class ImportReference(InventoryAction): """Imports selected reference to inside of the file.""" label = "Import Reference" diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index 1cb63c8a7a..483ad32402 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -2,14 +2,14 @@ """ -from avalon import api +from openpype.pipeline import load from openpype.hosts.maya.api.lib import ( maintained_selection, unique_namespace ) -class SetFrameRangeLoader(api.Loader): +class SetFrameRangeLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -43,7 +43,7 @@ class SetFrameRangeLoader(api.Loader): animationEndTime=end) -class SetFrameRangeWithHandlesLoader(api.Loader): +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -81,7 +81,7 @@ class SetFrameRangeWithHandlesLoader(api.Loader): animationEndTime=end) -class ImportMayaLoader(api.Loader): +class ImportMayaLoader(load.LoaderPlugin): """Import action for Maya (unmanaged) Warning: diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py index 18b34d2233..18de4df3b1 100644 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ b/openpype/hosts/maya/plugins/load/load_ass.py @@ -1,8 +1,11 @@ import os import clique -from avalon import api from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api.plugin import get_reference_node from openpype.hosts.maya.api.lib import ( @@ -106,7 +109,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): node = container["objectName"] representation["context"].pop("frame", None) - path = api.get_representation_path(representation) + path = get_representation_path(representation) print(path) # path = self.fname print(self.fname) @@ -164,7 +167,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): type="string") -class AssStandinLoader(api.Loader): +class AssStandinLoader(load.LoaderPlugin): """Load .ASS file as standin""" families = ["ass"] @@ -240,7 +243,7 @@ class AssStandinLoader(api.Loader): import pymel.core as pm - path = api.get_representation_path(representation) + path = get_representation_path(representation) files_in_path = os.listdir(os.path.split(path)[0]) sequence = 0 diff --git a/openpype/hosts/maya/plugins/load/load_assembly.py b/openpype/hosts/maya/plugins/load/load_assembly.py index 0151da7253..902f38695c 100644 --- a/openpype/hosts/maya/plugins/load/load_assembly.py +++ b/openpype/hosts/maya/plugins/load/load_assembly.py @@ -1,7 +1,10 @@ -from avalon import api +from openpype.pipeline import ( + load, + remove_container +) -class AssemblyLoader(api.Loader): +class AssemblyLoader(load.LoaderPlugin): families = ["assembly"] representations = ["json"] @@ -48,13 +51,11 @@ class AssemblyLoader(api.Loader): def update(self, container, representation): from openpype import setdress - return setdress.update_package(container, - representation) + return setdress.update_package(container, representation) def remove(self, container): """Remove all sub containers""" - from avalon import api from openpype import setdress import maya.cmds as cmds @@ -63,7 +64,7 @@ class AssemblyLoader(api.Loader): for member_container in member_containers: self.log.info("Removing container %s", member_container['objectName']) - api.remove(member_container) + remove_container(member_container) # Remove alembic hierarchy reference # TODO: Check whether removing all contained references is safe enough diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index 99f1f7c172..d8844ffea6 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -1,10 +1,14 @@ from maya import cmds, mel -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.pipeline import containerise from openpype.hosts.maya.api.lib import unique_namespace -class AudioLoader(api.Loader): +class AudioLoader(load.LoaderPlugin): """Specific loader of audio.""" families = ["audio"] @@ -51,7 +55,7 @@ class AudioLoader(api.Loader): assert audio_node is not None, "Audio node not found." - path = api.get_representation_path(representation) + path = get_representation_path(representation) audio_node.filename.set(path) cmds.setAttr( container["objectName"] + ".representation", diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py index 2e0b7bb810..591e568e4c 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/openpype/hosts/maya/plugins/load/load_gpucache.py @@ -1,9 +1,13 @@ import os -from avalon import api + +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.api import get_project_settings -class GpuCacheLoader(api.Loader): +class GpuCacheLoader(load.LoaderPlugin): """Load model Alembic as gpuCache""" families = ["model"] @@ -73,7 +77,7 @@ class GpuCacheLoader(api.Loader): import maya.cmds as cmds - path = api.get_representation_path(representation) + path = get_representation_path(representation) # Update the cache members = cmds.sets(container['objectName'], query=True) diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index 8e33f51389..b250986489 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -1,6 +1,10 @@ from Qt import QtWidgets, QtCore -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.pipeline import containerise from openpype.hosts.maya.api.lib import unique_namespace @@ -74,7 +78,7 @@ class CameraWindow(QtWidgets.QDialog): self.close() -class ImagePlaneLoader(api.Loader): +class ImagePlaneLoader(load.LoaderPlugin): """Specific loader of plate for image planes on selected camera.""" families = ["image", "plate", "render"] @@ -203,7 +207,7 @@ class ImagePlaneLoader(api.Loader): assert image_plane_shape is not None, "Image plane not found." - path = api.get_representation_path(representation) + path = get_representation_path(representation) image_plane_shape.imageName.set(path) cmds.setAttr( container["objectName"] + ".representation", diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py index 96c1ecbb20..8f02ed59b8 100644 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ b/openpype/hosts/maya/plugins/load/load_look.py @@ -5,7 +5,8 @@ from collections import defaultdict from Qt import QtWidgets -from avalon import api, io +from avalon import io +from openpype.pipeline import get_representation_path import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api import lib from openpype.widgets.message_window import ScrollMessageBox @@ -77,7 +78,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): }) # Load relationships - shader_relation = api.get_representation_path(json_representation) + shader_relation = get_representation_path(json_representation) with open(shader_relation, "r") as f: json_data = json.load(f) diff --git a/openpype/hosts/maya/plugins/load/load_matchmove.py b/openpype/hosts/maya/plugins/load/load_matchmove.py index abc702cde8..ee3332bd09 100644 --- a/openpype/hosts/maya/plugins/load/load_matchmove.py +++ b/openpype/hosts/maya/plugins/load/load_matchmove.py @@ -1,8 +1,8 @@ -from avalon import api from maya import mel +from openpype.pipeline import load -class MatchmoveLoader(api.Loader): +class MatchmoveLoader(load.LoaderPlugin): """ This will run matchmove script to create track in scene. diff --git a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py index fd2ae0f1d3..d93a9f02a2 100644 --- a/openpype/hosts/maya/plugins/load/load_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/load/load_redshift_proxy.py @@ -5,8 +5,11 @@ import clique import maya.cmds as cmds -from avalon import api from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.lib import ( namespaced, maintained_selection, @@ -15,7 +18,7 @@ from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.pipeline import containerise -class RedshiftProxyLoader(api.Loader): +class RedshiftProxyLoader(load.LoaderPlugin): """Load Redshift proxy""" families = ["redshiftproxy"] @@ -78,7 +81,7 @@ class RedshiftProxyLoader(api.Loader): rs_meshes = cmds.ls(members, type="RedshiftProxyMesh") assert rs_meshes, "Cannot find RedshiftProxyMesh in container" - filename = api.get_representation_path(representation) + filename = get_representation_path(representation) for rs_mesh in rs_meshes: cmds.setAttr("{}.fileName".format(rs_mesh), diff --git a/openpype/hosts/maya/plugins/load/load_rendersetup.py b/openpype/hosts/maya/plugins/load/load_rendersetup.py index efeff2f193..7a2d8b1002 100644 --- a/openpype/hosts/maya/plugins/load/load_rendersetup.py +++ b/openpype/hosts/maya/plugins/load/load_rendersetup.py @@ -7,10 +7,13 @@ instance. """ import json -import six import sys +import six -from avalon import api +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api import lib from openpype.hosts.maya.api.pipeline import containerise @@ -18,7 +21,7 @@ from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup -class RenderSetupLoader(api.Loader): +class RenderSetupLoader(load.LoaderPlugin): """Load json preset for RenderSetup overwriting current one.""" families = ["rendersetup"] @@ -87,7 +90,7 @@ class RenderSetupLoader(api.Loader): "Render setup setting will be overwritten by new version. All " "setting specified by user not included in loaded version " "will be lost.") - path = api.get_representation_path(representation) + path = get_representation_path(representation) with open(path, "r") as file: try: renderSetup.instance().decode( diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py index 3e1d67ae9a..70bd9d22e2 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_redshift.py @@ -1,9 +1,10 @@ import os -from avalon import api + from openpype.api import get_project_settings +from openpype.pipeline import load -class LoadVDBtoRedShift(api.Loader): +class LoadVDBtoRedShift(load.LoaderPlugin): """Load OpenVDB in a Redshift Volume Shape""" families = ["vdbcache"] diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py index 6d5544103d..4f14235bfb 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py @@ -1,6 +1,10 @@ import os -from avalon import api + from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from maya import cmds @@ -69,7 +73,7 @@ def _fix_duplicate_vvg_callbacks(): matched.add(callback) -class LoadVDBtoVRay(api.Loader): +class LoadVDBtoVRay(load.LoaderPlugin): families = ["vdbcache"] representations = ["vdb"] @@ -252,7 +256,7 @@ class LoadVDBtoVRay(api.Loader): def update(self, container, representation): - path = api.get_representation_path(representation) + path = get_representation_path(representation) # Find VRayVolumeGrid members = cmds.sets(container['objectName'], query=True) diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index ac2fe635b3..69d54df62b 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -7,10 +7,16 @@ loader will use them instead of native vray vrmesh format. """ import os +from bson.objectid import ObjectId + import maya.cmds as cmds -from avalon import api, io +from avalon import io from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.lib import ( maintained_selection, namespaced, @@ -19,7 +25,7 @@ from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.pipeline import containerise -class VRayProxyLoader(api.Loader): +class VRayProxyLoader(load.LoaderPlugin): """Load VRay Proxy with Alembic or VrayMesh.""" families = ["vrayproxy", "model", "pointcache", "animation"] @@ -100,7 +106,10 @@ class VRayProxyLoader(api.Loader): assert vraymeshes, "Cannot find VRayMesh in container" # get all representations for this version - filename = self._get_abc(representation["parent"]) or api.get_representation_path(representation) # noqa: E501 + filename = ( + self._get_abc(representation["parent"]) + or get_representation_path(representation) + ) for vray_mesh in vraymeshes: cmds.setAttr("{}.fileName".format(vray_mesh), @@ -179,13 +188,13 @@ class VRayProxyLoader(api.Loader): abc_rep = io.find_one( { "type": "representation", - "parent": io.ObjectId(version_id), + "parent": ObjectId(version_id), "name": "abc" }) if abc_rep: self.log.debug("Found, we'll link alembic to vray proxy.") - file_name = api.get_representation_path(abc_rep) + file_name = get_representation_path(abc_rep) self.log.debug("File: {}".format(self.fname)) return file_name diff --git a/openpype/hosts/maya/plugins/load/load_vrayscene.py b/openpype/hosts/maya/plugins/load/load_vrayscene.py index dfe2b85edc..61132088cc 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayscene.py +++ b/openpype/hosts/maya/plugins/load/load_vrayscene.py @@ -1,8 +1,11 @@ # -*- coding: utf-8 -*- import os import maya.cmds as cmds # noqa -from avalon import api from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api.lib import ( maintained_selection, namespaced, @@ -11,7 +14,7 @@ from openpype.hosts.maya.api.lib import ( from openpype.hosts.maya.api.pipeline import containerise -class VRaySceneLoader(api.Loader): +class VRaySceneLoader(load.LoaderPlugin): """Load Vray scene""" families = ["vrayscene_layer"] @@ -78,7 +81,7 @@ class VRaySceneLoader(api.Loader): vraymeshes = cmds.ls(members, type="VRayScene") assert vraymeshes, "Cannot find VRayScene in container" - filename = api.get_representation_path(representation) + filename = get_representation_path(representation) for vray_mesh in vraymeshes: cmds.setAttr("{}.FilePath".format(vray_mesh), diff --git a/openpype/hosts/maya/plugins/load/load_yeti_cache.py b/openpype/hosts/maya/plugins/load/load_yeti_cache.py index dfe75173ac..c64e1c540b 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_cache.py +++ b/openpype/hosts/maya/plugins/load/load_yeti_cache.py @@ -7,13 +7,17 @@ from pprint import pprint from maya import cmds -from avalon import api, io +from avalon import io from openpype.api import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) from openpype.hosts.maya.api import lib from openpype.hosts.maya.api.pipeline import containerise -class YetiCacheLoader(api.Loader): +class YetiCacheLoader(load.LoaderPlugin): families = ["yeticache", "yetiRig"] representations = ["fur"] @@ -121,8 +125,8 @@ class YetiCacheLoader(api.Loader): "cannot find fursettings representation" ) - settings_fname = api.get_representation_path(fur_settings) - path = api.get_representation_path(representation) + settings_fname = get_representation_path(fur_settings) + path = get_representation_path(representation) # Get all node data with open(settings_fname, "r") as fp: settings = json.load(fp) diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py index 389995d30c..3a47cdadb5 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py +++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py @@ -6,7 +6,7 @@ from maya import cmds import openpype.api from openpype.hosts.maya.api.lib import maintained_selection -from avalon.pipeline import AVALON_CONTAINER_ID +from openpype.pipeline import AVALON_CONTAINER_ID class ExtractMayaSceneRaw(openpype.api.Extractor): diff --git a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py index 00f0d38775..05d63f1d56 100644 --- a/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_animation_out_set_related_node_ids.py @@ -32,8 +32,8 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): # if a deformer has been created on the shape invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Nodes found with non-related " - "asset IDs: {0}".format(invalid)) + raise RuntimeError("Nodes found with mismatching " + "IDs: {0}".format(invalid)) @classmethod def get_invalid(cls, instance): @@ -65,7 +65,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): invalid.append(node) continue - history_id = lib.get_id_from_history(node) + history_id = lib.get_id_from_sibling(node) if history_id is not None and node_id != history_id: invalid.append(node) @@ -76,7 +76,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): for node in cls.get_invalid(instance): # Get the original id from history - history_id = lib.get_id_from_history(node) + history_id = lib.get_id_from_sibling(node) if not history_id: cls.log.error("Could not find ID in history for '%s'", node) continue diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py index a4d4d2bcc2..0324be9fc9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_deformed_shapes.py @@ -48,7 +48,7 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): invalid = [] for shape in shapes: - history_id = lib.get_id_from_history(shape) + history_id = lib.get_id_from_sibling(shape) if history_id: current_id = lib.get_id(shape) if current_id != history_id: @@ -61,7 +61,7 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): for node in cls.get_invalid(instance): # Get the original id from history - history_id = lib.get_id_from_history(node) + history_id = lib.get_id_from_sibling(node) if not history_id: cls.log.error("Could not find ID in history for '%s'", node) continue diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py index e2090080f6..cc3723a6e1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_out_set_node_ids.py @@ -24,6 +24,7 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): openpype.hosts.maya.api.action.SelectInvalidAction, openpype.api.RepairAction ] + allow_history_only = False def process(self, instance): """Process all meshes""" @@ -32,8 +33,8 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): # if a deformer has been created on the shape invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Nodes found with non-related " - "asset IDs: {0}".format(invalid)) + raise RuntimeError("Nodes found with mismatching " + "IDs: {0}".format(invalid)) @classmethod def get_invalid(cls, instance): @@ -51,10 +52,13 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): noIntermediate=True) for shape in shapes: - history_id = lib.get_id_from_history(shape) - if history_id: + sibling_id = lib.get_id_from_sibling( + shape, + history_only=cls.allow_history_only + ) + if sibling_id: current_id = lib.get_id(shape) - if current_id != history_id: + if current_id != sibling_id: invalid.append(shape) return invalid @@ -63,10 +67,13 @@ class ValidateRigOutSetNodeIds(pyblish.api.InstancePlugin): def repair(cls, instance): for node in cls.get_invalid(instance): - # Get the original id from history - history_id = lib.get_id_from_history(node) - if not history_id: - cls.log.error("Could not find ID in history for '%s'", node) + # Get the original id from sibling + sibling_id = lib.get_id_from_sibling( + node, + history_only=cls.allow_history_only + ) + if not sibling_id: + cls.log.error("Could not find ID in siblings for '%s'", node) continue - lib.set_id(node, history_id, overwrite=True) + lib.set_id(node, sibling_id, overwrite=True) diff --git a/openpype/hosts/nuke/api/command.py b/openpype/hosts/nuke/api/command.py index 212d4757c6..6f74c08e97 100644 --- a/openpype/hosts/nuke/api/command.py +++ b/openpype/hosts/nuke/api/command.py @@ -1,6 +1,7 @@ import logging import contextlib import nuke +from bson.objectid import ObjectId from avalon import api, io @@ -70,10 +71,10 @@ def get_handles(asset): if "visualParent" in data: vp = data["visualParent"] if vp is not None: - parent_asset = io.find_one({"_id": io.ObjectId(vp)}) + parent_asset = io.find_one({"_id": ObjectId(vp)}) if parent_asset is None: - parent_asset = io.find_one({"_id": io.ObjectId(asset["parent"])}) + parent_asset = io.find_one({"_id": ObjectId(asset["parent"])}) if parent_asset is not None: return get_handles(parent_asset) diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index dba7ec1b85..3c8ba3e77c 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -6,10 +6,11 @@ import contextlib from collections import OrderedDict import clique +from bson.objectid import ObjectId import nuke -from avalon import api, io, lib +from avalon import api, io from openpype.api import ( Logger, @@ -20,7 +21,6 @@ from openpype.api import ( get_workdir_data, get_asset, get_current_project_settings, - ApplicationManager ) from openpype.tools.utils import host_tools from openpype.lib.path_tools import HostDirmap @@ -570,7 +570,7 @@ def check_inventory_versions(): # get representation from io representation = io.find_one({ "type": "representation", - "_id": io.ObjectId(avalon_knob_data["representation"]) + "_id": ObjectId(avalon_knob_data["representation"]) }) # Failsafe for not finding the representation. diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 348c9680b2..1d110cb94a 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -6,7 +6,6 @@ import nuke import pyblish.api import avalon.api -from avalon import pipeline import openpype from openpype.api import ( @@ -15,7 +14,14 @@ from openpype.api import ( get_current_project_settings ) from openpype.lib import register_event_callback -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + register_inventory_action_path, + deregister_loader_plugin_path, + deregister_inventory_action_path, + AVALON_CONTAINER_ID, +) from openpype.tools.utils import host_tools from .command import viewer_update_and_undo_stop @@ -99,9 +105,9 @@ def install(): log.info("Registering Nuke plug-ins..") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) + register_inventory_action_path(INVENTORY_PATH) # Register Avalon event for workfiles loading. register_event_callback("workio.open_file", check_inventory_versions) @@ -125,8 +131,9 @@ def uninstall(): log.info("Deregistering Nuke plug-ins..") pyblish.deregister_host("nuke") pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_inventory_action_path(INVENTORY_PATH) pyblish.api.deregister_callback( "instanceToggled", on_pyblish_instance_toggled) @@ -326,7 +333,7 @@ def containerise(node, data = OrderedDict( [ ("schema", "openpype:container-2.0"), - ("id", pipeline.AVALON_CONTAINER_ID), + ("id", AVALON_CONTAINER_ID), ("name", name), ("namespace", namespace), ("loader", str(loader)), diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index ff186cd685..d0bb45a05d 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -4,10 +4,11 @@ import string import nuke -import avalon.api - from openpype.api import get_current_project_settings -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from .lib import ( Knobby, check_subsetname_exists, @@ -85,7 +86,7 @@ def get_review_presets_config(): return [str(name) for name, _prop in outputs.items()] -class NukeLoader(avalon.api.Loader): +class NukeLoader(LoaderPlugin): container_id_knob = "containerId" container_id = None diff --git a/openpype/hosts/nuke/api/workio.py b/openpype/hosts/nuke/api/workio.py index dbc24fdc9b..68fcb0927f 100644 --- a/openpype/hosts/nuke/api/workio.py +++ b/openpype/hosts/nuke/api/workio.py @@ -1,11 +1,12 @@ """Host API required Work Files tool""" import os import nuke -import avalon.api + +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS def file_extensions(): - return avalon.api.HOST_WORKFILE_EXTENSIONS["nuke"] + return HOST_WORKFILE_EXTENSIONS["nuke"] def has_unsaved_changes(): diff --git a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py index 5f834be557..c04c939a8d 100644 --- a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py +++ b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py @@ -1,9 +1,9 @@ -from avalon import api from openpype.api import Logger +from openpype.pipeline import InventoryAction from openpype.hosts.nuke.api.lib import set_avalon_knob_data -class RepairOldLoaders(api.InventoryAction): +class RepairOldLoaders(InventoryAction): label = "Repair Old Loaders" icon = "gears" diff --git a/openpype/hosts/nuke/plugins/inventory/select_containers.py b/openpype/hosts/nuke/plugins/inventory/select_containers.py index 3f174b3562..d7d5f00b87 100644 --- a/openpype/hosts/nuke/plugins/inventory/select_containers.py +++ b/openpype/hosts/nuke/plugins/inventory/select_containers.py @@ -1,8 +1,8 @@ -from avalon import api +from openpype.pipeline import InventoryAction from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop -class SelectContainers(api.InventoryAction): +class SelectContainers(InventoryAction): label = "Select Containers" icon = "mouse-pointer" diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py index 07dcf2d8e1..81840b3a38 100644 --- a/openpype/hosts/nuke/plugins/load/actions.py +++ b/openpype/hosts/nuke/plugins/load/actions.py @@ -2,13 +2,13 @@ """ -from avalon import api from openpype.api import Logger +from openpype.pipeline import load log = Logger().get_logger(__name__) -class SetFrameRangeLoader(api.Loader): +class SetFrameRangeLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -42,7 +42,7 @@ class SetFrameRangeLoader(api.Loader): lib.update_frame_range(start, end) -class SetFrameRangeWithHandlesLoader(api.Loader): +class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index 58ebcc7d49..36cec6f4c5 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -1,7 +1,11 @@ -from avalon import api, io +from avalon import io import nuke import nukescripts +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( find_free_space_to_paste_nodes, maintained_selection, @@ -14,7 +18,7 @@ from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop from openpype.hosts.nuke.api import containerise, update_container -class LoadBackdropNodes(api.Loader): +class LoadBackdropNodes(load.LoaderPlugin): """Loading Published Backdrop nodes (workfile, nukenodes)""" representations = ["nk"] @@ -191,7 +195,7 @@ class LoadBackdropNodes(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") context = representation["context"] name = container['name'] version_data = version.get("data", {}) diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py index 9610940619..fb5f7f8ede 100644 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py @@ -1,6 +1,10 @@ import nuke -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api import ( containerise, update_container, @@ -11,7 +15,7 @@ from openpype.hosts.nuke.api.lib import ( ) -class AlembicCameraLoader(api.Loader): +class AlembicCameraLoader(load.LoaderPlugin): """ This will load alembic camera into script. """ @@ -127,7 +131,7 @@ class AlembicCameraLoader(api.Loader): data_imprint.update({k: version_data[k]}) # getting file path - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") with maintained_selection(): camera_node = nuke.toNode(object_name) diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index a253ba4a9d..9b0588feac 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -1,7 +1,8 @@ import nuke import qargparse -from avalon import api, io +from avalon import io +from openpype.pipeline import get_representation_path from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace, maintained_selection @@ -41,6 +42,9 @@ class LoadClip(plugin.NukeLoader): icon = "file-video-o" color = "white" + # Loaded from settings + _representations = [] + script_start = int(nuke.root()["first_frame"].value()) # option gui @@ -97,7 +101,7 @@ class LoadClip(plugin.NukeLoader): last += self.handle_end if not is_sequence: - duration = last - first + 1 + duration = last - first first = 1 last = first + duration elif "#" not in file: @@ -186,7 +190,7 @@ class LoadClip(plugin.NukeLoader): is_sequence = len(representation["files"]) > 1 read_node = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") start_at_workfile = bool("start at" in read_node['frame_mode'].value()) @@ -212,7 +216,7 @@ class LoadClip(plugin.NukeLoader): last += self.handle_end if not is_sequence: - duration = last - first + 1 + duration = last - first first = 1 last = first + duration elif "#" not in file: diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index 4d83da1a78..68c3952942 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -1,8 +1,13 @@ import json from collections import OrderedDict import nuke -from avalon import api, io +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api import ( containerise, update_container, @@ -10,7 +15,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadEffects(api.Loader): +class LoadEffects(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" representations = ["effectJson"] @@ -150,7 +155,7 @@ class LoadEffects(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 4d30e0f93c..9c4fd4c2c6 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -3,8 +3,12 @@ from collections import OrderedDict import nuke -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api import lib from openpype.hosts.nuke.api import ( containerise, @@ -13,7 +17,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadEffectsInputProcess(api.Loader): +class LoadEffectsInputProcess(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" representations = ["effectJson"] @@ -157,7 +161,7 @@ class LoadEffectsInputProcess(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py index 9c726d8fe6..6f2b191be9 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py @@ -1,6 +1,11 @@ import nuke -from avalon import api, io +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( maintained_selection, get_avalon_knob_data, @@ -13,7 +18,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadGizmo(api.Loader): +class LoadGizmo(load.LoaderPlugin): """Loading nuke Gizmo""" representations = ["gizmo"] @@ -104,7 +109,7 @@ class LoadGizmo(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 78d2625758..87bebce15b 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,6 +1,11 @@ -from avalon import api, io import nuke +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( maintained_selection, create_backdrop, @@ -14,7 +19,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadGizmoInputProcess(api.Loader): +class LoadGizmoInputProcess(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" representations = ["gizmo"] @@ -110,7 +115,7 @@ class LoadGizmoInputProcess(api.Loader): # get corresponding node GN = nuke.toNode(container['objectName']) - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") name = container['name'] version_data = version.get("data", {}) vname = version.get("name", None) diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 27c634ec57..9a175a0cba 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -1,8 +1,12 @@ import nuke import qargparse -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace ) @@ -13,7 +17,7 @@ from openpype.hosts.nuke.api import ( ) -class LoadImage(api.Loader): +class LoadImage(load.LoaderPlugin): """Load still image into Nuke""" families = [ @@ -32,6 +36,9 @@ class LoadImage(api.Loader): icon = "image" color = "white" + # Loaded from settings + _representations = [] + node_name_template = "{class_name}_{ext}" options = [ @@ -161,7 +168,7 @@ class LoadImage(api.Loader): repr_cont = representation["context"] - file = api.get_representation_path(representation) + file = get_representation_path(representation) if not file: repr_id = representation["_id"] diff --git a/openpype/hosts/nuke/plugins/load/load_matchmove.py b/openpype/hosts/nuke/plugins/load/load_matchmove.py index 60d5dc026f..f5a90706c7 100644 --- a/openpype/hosts/nuke/plugins/load/load_matchmove.py +++ b/openpype/hosts/nuke/plugins/load/load_matchmove.py @@ -1,8 +1,8 @@ -from avalon import api import nuke +from openpype.pipeline import load -class MatchmoveLoader(api.Loader): +class MatchmoveLoader(load.LoaderPlugin): """ This will run matchmove script to create track in script. """ diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index 2b52bbf00f..e445beca05 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -1,5 +1,9 @@ import nuke -from avalon import api, io +from avalon import io +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import maintained_selection from openpype.hosts.nuke.api import ( containerise, @@ -8,7 +12,7 @@ from openpype.hosts.nuke.api import ( ) -class AlembicModelLoader(api.Loader): +class AlembicModelLoader(load.LoaderPlugin): """ This will load alembic model into script. """ @@ -124,7 +128,7 @@ class AlembicModelLoader(api.Loader): data_imprint.update({k: version_data[k]}) # getting file path - file = api.get_representation_path(representation).replace("\\", "/") + file = get_representation_path(representation).replace("\\", "/") with maintained_selection(): model_node = nuke.toNode(object_name) diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index 48bf0b889f..779f101682 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -1,6 +1,11 @@ import nuke -from avalon import api, io +from avalon import io + +from openpype.pipeline import ( + load, + get_representation_path, +) from openpype.hosts.nuke.api.lib import get_avalon_knob_data from openpype.hosts.nuke.api import ( containerise, @@ -9,7 +14,7 @@ from openpype.hosts.nuke.api import ( ) -class LinkAsGroup(api.Loader): +class LinkAsGroup(load.LoaderPlugin): """Copy the published file to be pasted at the desired location""" representations = ["nk"] @@ -109,7 +114,7 @@ class LinkAsGroup(api.Loader): """ node = nuke.toNode(container['objectName']) - root = api.get_representation_path(representation).replace("\\", "/") + root = get_representation_path(representation).replace("\\", "/") # Get start frame from version data version = io.find_one({ diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py index 189f28f7c6..85e98db7ed 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py @@ -3,8 +3,9 @@ import re from pprint import pformat import nuke import pyblish.api +from avalon import io import openpype.api as pype -from avalon import io, api +from openpype.pipeline import get_representation_path @pyblish.api.log @@ -182,7 +183,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): if repre_doc: instance.data["audio"] = [{ "offset": 0, - "filename": api.get_representation_path(repre_doc) + "filename": get_representation_path(repre_doc) }] self.log.debug("instance.data: {}".format(pformat(instance.data))) diff --git a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py index 22a9b3678e..2bf1ff81f8 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py +++ b/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py @@ -1,12 +1,16 @@ import os -import toml import nuke +import toml import pyblish.api -from avalon import api from bson.objectid import ObjectId +from openpype.pipeline import ( + discover_loader_plugins, + load_container, +) + class RepairReadLegacyAction(pyblish.api.Action): @@ -49,13 +53,13 @@ class RepairReadLegacyAction(pyblish.api.Action): loader_name = "LoadMov" loader_plugin = None - for Loader in api.discover(api.Loader): + for Loader in discover_loader_plugins(): if Loader.__name__ != loader_name: continue loader_plugin = Loader - api.load( + load_container( Loader=loader_plugin, representation=ObjectId(data["representation"]) ) diff --git a/openpype/hosts/photoshop/api/README.md b/openpype/hosts/photoshop/api/README.md index b958f53803..80792a4da0 100644 --- a/openpype/hosts/photoshop/api/README.md +++ b/openpype/hosts/photoshop/api/README.md @@ -195,11 +195,12 @@ class ExtractImage(openpype.api.Extractor): #### Loader Plugin ```python from avalon import api, photoshop +from openpype.pipeline import load, get_representation_path stub = photoshop.stub() -class ImageLoader(api.Loader): +class ImageLoader(load.LoaderPlugin): """Load images Stores the imported asset in a container named after the asset. @@ -227,7 +228,7 @@ class ImageLoader(api.Loader): with photoshop.maintained_selection(): stub.replace_smart_object( - layer, api.get_representation_path(representation) + layer, get_representation_path(representation) ) stub.imprint( @@ -245,7 +246,7 @@ https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/ Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome then localhost:8078 (port set in `photoshop\extension\.debug`) -Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 +Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01 Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x ## Resources diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index 1be8129aa1..c2ad0ac7b0 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,14 +1,19 @@ import os from Qt import QtWidgets +from bson.objectid import ObjectId import pyblish.api import avalon.api -from avalon import pipeline, io +from avalon import io from openpype.api import Logger - from openpype.lib import register_event_callback -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) import openpype.hosts.photoshop from . import lib @@ -33,7 +38,7 @@ def check_inventory(): representation = container['representation'] representation_doc = io.find_one( { - "_id": io.ObjectId(representation), + "_id": ObjectId(representation), "type": "representation" }, projection={"parent": True} @@ -69,7 +74,7 @@ def install(): pyblish.api.register_host("photoshop") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) log.info(PUBLISH_PATH) @@ -82,7 +87,7 @@ def install(): def uninstall(): pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) @@ -218,7 +223,7 @@ def containerise( data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "name": name, "namespace": namespace, "loader": str(loader), diff --git a/openpype/hosts/photoshop/api/plugin.py b/openpype/hosts/photoshop/api/plugin.py index c577c67d82..c80e6bbd06 100644 --- a/openpype/hosts/photoshop/api/plugin.py +++ b/openpype/hosts/photoshop/api/plugin.py @@ -1,6 +1,6 @@ import re -import avalon.api +from openpype.pipeline import LoaderPlugin from .launch_logic import stub @@ -29,7 +29,7 @@ def get_unique_layer_name(layers, asset_name, subset_name): return "{}_{:0>3d}".format(name, occurrences + 1) -class PhotoshopLoader(avalon.api.Loader): +class PhotoshopLoader(LoaderPlugin): @staticmethod def get_stub(): return stub() diff --git a/openpype/hosts/photoshop/api/workio.py b/openpype/hosts/photoshop/api/workio.py index 0bf3ed2bd9..951c5dbfff 100644 --- a/openpype/hosts/photoshop/api/workio.py +++ b/openpype/hosts/photoshop/api/workio.py @@ -1,8 +1,7 @@ """Host API required Work Files tool""" import os -import avalon.api - +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS from . import lib @@ -15,7 +14,7 @@ def _active_document(): def file_extensions(): - return avalon.api.HOST_WORKFILE_EXTENSIONS["photoshop"] + return HOST_WORKFILE_EXTENSIONS["photoshop"] def has_unsaved_changes(): diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py index 3b1cfe9636..0a9421b8f2 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image.py +++ b/openpype/hosts/photoshop/plugins/load/load_image.py @@ -1,6 +1,6 @@ import re -from avalon import api +from openpype.pipeline import get_representation_path from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -54,7 +54,7 @@ class ImageLoader(photoshop.PhotoshopLoader): else: # switching version - keep same name layer_name = container["namespace"] - path = api.get_representation_path(representation) + path = get_representation_path(representation) with photoshop.maintained_selection(): stub.replace_smart_object( layer, path, layer_name diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py index 12e0503dfc..5f39121ae1 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py +++ b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py @@ -1,8 +1,8 @@ import os import qargparse -from avalon.pipeline import get_representation_path_from_context +from openpype.pipeline import get_representation_path_from_context from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py index 60142d4a1f..f5f0545d39 100644 --- a/openpype/hosts/photoshop/plugins/load/load_reference.py +++ b/openpype/hosts/photoshop/plugins/load/load_reference.py @@ -1,7 +1,6 @@ import re -from avalon import api - +from openpype.pipeline import get_representation_path from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -55,7 +54,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): else: # switching version - keep same name layer_name = container["namespace"] - path = api.get_representation_path(representation) + path = get_representation_path(representation) with photoshop.maintained_selection(): stub.replace_smart_object( layer, path, layer_name diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index c82545268b..e8b017ead5 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -6,10 +6,14 @@ import contextlib from collections import OrderedDict from avalon import api as avalon from avalon import schema -from avalon.pipeline import AVALON_CONTAINER_ID from pyblish import api as pyblish from openpype.api import Logger -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) from . import lib from . import PLUGINS_DIR from openpype.tools.utils import host_tools @@ -18,7 +22,6 @@ log = Logger().get_logger(__name__) PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -42,9 +45,8 @@ def install(): pyblish.register_plugin_path(PUBLISH_PATH) log.info("Registering DaVinci Resovle plug-ins..") - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.register_plugin_path(LegacyCreator, CREATE_PATH) - avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) @@ -67,9 +69,8 @@ def uninstall(): pyblish.deregister_plugin_path(PUBLISH_PATH) log.info("Deregistering DaVinci Resovle plug-ins..") - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) - avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index e7793d6e95..8e1436021c 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -4,14 +4,15 @@ import uuid import qargparse from Qt import QtWidgets, QtCore -from avalon import api import openpype.api as pype -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from openpype.hosts import resolve from . import lib - class CreatorWidget(QtWidgets.QDialog): # output items @@ -292,7 +293,7 @@ class ClipLoader: """ Initialize object Arguments: - cls (avalon.api.Loader): plugin object + cls (openpype.pipeline.load.LoaderPlugin): plugin object context (dict): loader plugin context options (dict)[optional]: possible keys: projectBinPath: "path/to/binItem" @@ -448,7 +449,7 @@ class ClipLoader: return timeline_item -class TimelineItemLoader(api.Loader): +class TimelineItemLoader(LoaderPlugin): """A basic SequenceLoader for Resolve This will implement the basic behavior for a loader to inherit from that diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index e20384ee6c..71850d95f6 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,11 +1,14 @@ -from avalon import io, api -from openpype.hosts import resolve from copy import deepcopy from importlib import reload + +from avalon import io +from openpype.hosts import resolve +from openpype.pipeline import get_representation_path from openpype.hosts.resolve.api import lib, plugin reload(plugin) reload(lib) + class LoadClip(resolve.TimelineItemLoader): """Load a subset to timeline as clip @@ -99,7 +102,7 @@ class LoadClip(resolve.TimelineItemLoader): version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) - self.fname = api.get_representation_path(representation) + self.fname = get_representation_path(representation) context["version"] = {"data": version_data} loader = resolve.ClipLoader(self, context) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py index ea0b6cdf41..c1c48ec72d 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py @@ -3,9 +3,10 @@ import re import pyblish.api import json -from avalon.api import format_template_with_optional_keys - -from openpype.lib import prepare_template_data +from openpype.lib import ( + prepare_template_data, + StringTemplate, +) class CollectTextures(pyblish.api.ContextPlugin): @@ -110,8 +111,9 @@ class CollectTextures(pyblish.api.ContextPlugin): formatting_data.update(explicit_data) fill_pairs = prepare_template_data(formatting_data) - workfile_subset = format_template_with_optional_keys( - fill_pairs, self.workfile_subset_template) + workfile_subset = StringTemplate.format_strict_template( + self.workfile_subset_template, fill_pairs + ) asset_build = self._get_asset_build( repre_file, @@ -201,8 +203,9 @@ class CollectTextures(pyblish.api.ContextPlugin): formatting_data.update(explicit_data) fill_pairs = prepare_template_data(formatting_data) - subset = format_template_with_optional_keys( - fill_pairs, self.texture_subset_template) + subset = StringTemplate.format_strict_template( + self.texture_subset_template, fill_pairs + ) asset_build = self._get_asset_build( repre_file, diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py index c18de5bc1c..f327895b83 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py @@ -2,6 +2,9 @@ import os import pyblish.api import openpype.api +from openpype.lib import ( + get_ffmpeg_tool_path, +) from pprint import pformat @@ -27,7 +30,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): instance.data["representations"] = list() # get ffmpet path - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") # get staging dir staging_dir = self.staging_dir(instance) @@ -44,7 +47,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): clip_trimed_path = os.path.join( staging_dir, instance.data["name"] + ext) # # check video file metadata - # input_data = plib.ffprobe_streams(video_file_path)[0] + # input_data = plib.get_ffprobe_streams(video_file_path)[0] # self.log.debug(f"__ input_data: `{input_data}`") start = float(instance.data["clipInH"]) diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/testhost/plugins/create/auto_creator.py index 45c573e487..d5935602a0 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/testhost/plugins/create/auto_creator.py @@ -1,10 +1,10 @@ +from avalon import io +from openpype.lib import NumberDef from openpype.hosts.testhost.api import pipeline from openpype.pipeline import ( AutoCreator, CreatedInstance, - lib ) -from avalon import io class MyAutoCreator(AutoCreator): @@ -13,7 +13,7 @@ class MyAutoCreator(AutoCreator): def get_instance_attr_defs(self): output = [ - lib.NumberDef("number_key", label="Number") + NumberDef("number_key", label="Number") ] return output diff --git a/openpype/hosts/testhost/plugins/create/test_creator_1.py b/openpype/hosts/testhost/plugins/create/test_creator_1.py index 45c30e8a27..7664276fa2 100644 --- a/openpype/hosts/testhost/plugins/create/test_creator_1.py +++ b/openpype/hosts/testhost/plugins/create/test_creator_1.py @@ -1,10 +1,16 @@ import json from openpype import resources from openpype.hosts.testhost.api import pipeline +from openpype.lib import ( + UISeparatorDef, + UILabelDef, + BoolDef, + NumberDef, + FileDef, +) from openpype.pipeline import ( Creator, CreatedInstance, - lib ) @@ -54,17 +60,17 @@ class TestCreatorOne(Creator): def get_instance_attr_defs(self): output = [ - lib.NumberDef("number_key", label="Number"), + NumberDef("number_key", label="Number"), ] return output def get_pre_create_attr_defs(self): output = [ - lib.BoolDef("use_selection", label="Use selection"), - lib.UISeparatorDef(), - lib.UILabelDef("Testing label"), - lib.FileDef("filepath", folders=True, label="Filepath"), - lib.FileDef( + BoolDef("use_selection", label="Use selection"), + UISeparatorDef(), + UILabelDef("Testing label"), + FileDef("filepath", folders=True, label="Filepath"), + FileDef( "filepath_2", multipath=True, folders=True, label="Filepath 2" ) ] diff --git a/openpype/hosts/testhost/plugins/create/test_creator_2.py b/openpype/hosts/testhost/plugins/create/test_creator_2.py index e66304a038..f54adee8a2 100644 --- a/openpype/hosts/testhost/plugins/create/test_creator_2.py +++ b/openpype/hosts/testhost/plugins/create/test_creator_2.py @@ -1,8 +1,8 @@ +from openpype.lib import NumberDef, TextDef from openpype.hosts.testhost.api import pipeline from openpype.pipeline import ( Creator, CreatedInstance, - lib ) @@ -40,8 +40,8 @@ class TestCreatorTwo(Creator): def get_instance_attr_defs(self): output = [ - lib.NumberDef("number_key"), - lib.TextDef("text_key") + NumberDef("number_key"), + TextDef("text_key") ] return output diff --git a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py index 3c035eccb6..c7241a15a8 100644 --- a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py +++ b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py @@ -1,10 +1,8 @@ import json import pyblish.api -from openpype.pipeline import ( - OpenPypePyblishPluginMixin, - attribute_definitions -) +from openpype.lib import attribute_definitions +from openpype.pipeline import OpenPypePyblishPluginMixin class CollectInstanceOneTestHost( diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index 836a0b4ed1..ec880a1abc 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -10,12 +10,16 @@ import pyblish.api import avalon.api from avalon import io -from avalon.pipeline import AVALON_CONTAINER_ID from openpype.hosts import tvpaint from openpype.api import get_current_project_settings from openpype.lib import register_event_callback -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) from .lib import ( execute_george, @@ -77,7 +81,7 @@ def install(): pyblish.api.register_host("tvpaint") pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + register_loader_plugin_path(LOAD_PATH) avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) registered_callbacks = ( @@ -99,7 +103,7 @@ def uninstall(): log.info("OpenPype - Uninstalling TVPaint integration") pyblish.api.deregister_host("tvpaint") pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + deregister_loader_plugin_path(LOAD_PATH) avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py index 8510794f06..15ad8905e0 100644 --- a/openpype/hosts/tvpaint/api/plugin.py +++ b/openpype/hosts/tvpaint/api/plugin.py @@ -1,9 +1,10 @@ import re import uuid -import avalon.api - -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) from openpype.hosts.tvpaint.api import ( pipeline, lib @@ -74,7 +75,7 @@ class Creator(LegacyCreator): self.write_instances(data) -class Loader(avalon.api.Loader): +class Loader(LoaderPlugin): hosts = ["tvpaint"] @staticmethod diff --git a/openpype/hosts/tvpaint/api/workio.py b/openpype/hosts/tvpaint/api/workio.py index c513bec6cf..88bdd7117e 100644 --- a/openpype/hosts/tvpaint/api/workio.py +++ b/openpype/hosts/tvpaint/api/workio.py @@ -4,6 +4,7 @@ """ from avalon import api +from openpype.pipeline import HOST_WORKFILE_EXTENSIONS from .lib import ( execute_george, execute_george_through_file @@ -47,7 +48,7 @@ def has_unsaved_changes(): def file_extensions(): """Return the supported file extensions for Blender scene files.""" - return api.HOST_WORKFILE_EXTENSIONS["tvpaint"] + return HOST_WORKFILE_EXTENSIONS["tvpaint"] def work_root(session): diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index 33e2a76cc9..d224cfc390 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,10 +1,11 @@ -import getpass import os from avalon import api, io from openpype.lib import ( + StringTemplate, get_workfile_template_key_from_context, - get_workdir_data + get_workdir_data, + get_last_workfile_with_version, ) from openpype.api import Anatomy from openpype.hosts.tvpaint.api import lib, pipeline, plugin @@ -67,9 +68,8 @@ class LoadWorkfile(plugin.Loader): data = get_workdir_data(project_doc, asset_doc, task_name, host_name) data["root"] = anatomy.roots - data["user"] = getpass.getuser() - template = anatomy.templates[template_key]["file"] + file_template = anatomy.templates[template_key]["file"] # Define saving file extension if current_file: @@ -81,11 +81,12 @@ class LoadWorkfile(plugin.Loader): data["ext"] = extension - work_root = api.format_template_with_optional_keys( - data, anatomy.templates[template_key]["folder"] + folder_template = anatomy.templates[template_key]["folder"] + work_root = StringTemplate.format_strict_template( + folder_template, data ) - version = api.last_workfile_with_version( - work_root, template, data, host.file_extensions() + version = get_last_workfile_with_version( + work_root, file_template, data, host.file_extensions() )[1] if version is None: @@ -95,8 +96,8 @@ class LoadWorkfile(plugin.Loader): data["version"] = version - path = os.path.join( - work_root, - api.format_template_with_optional_keys(data, template) + filename = StringTemplate.format_strict_template( + file_template, data ) + path = os.path.join(work_root, filename) host.save_file(path) diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index 8ab19bd697..713c588976 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -4,10 +4,14 @@ import logging from typing import List import pyblish.api -from avalon.pipeline import AVALON_CONTAINER_ID from avalon import api -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + LegacyCreator, + register_loader_plugin_path, + deregister_loader_plugin_path, + AVALON_CONTAINER_ID, +) from openpype.tools.utils import host_tools import openpype.hosts.unreal @@ -44,7 +48,7 @@ def install(): print("-=" * 40) logger.info("installing OpenPype for Unreal") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) - api.register_plugin_path(api.Loader, str(LOAD_PATH)) + register_loader_plugin_path(str(LOAD_PATH)) api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) _register_callbacks() _register_events() @@ -53,7 +57,7 @@ def install(): def uninstall(): """Uninstall Unreal configuration for Avalon.""" pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) - api.deregister_plugin_path(api.Loader, str(LOAD_PATH)) + deregister_loader_plugin_path(str(LOAD_PATH)) api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py index dd2e7750f0..b24bab831d 100644 --- a/openpype/hosts/unreal/api/plugin.py +++ b/openpype/hosts/unreal/api/plugin.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- from abc import ABC -from openpype.pipeline import LegacyCreator -import avalon.api +from openpype.pipeline import ( + LegacyCreator, + LoaderPlugin, +) class Creator(LegacyCreator): @@ -10,6 +12,6 @@ class Creator(LegacyCreator): defaults = ['Main'] -class Loader(avalon.api.Loader, ABC): +class Loader(LoaderPlugin, ABC): """This serves as skeleton for future OpenPype specific functionality""" pass diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py index 027e9f4cd3..6ac3531b40 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py @@ -2,7 +2,10 @@ """Loader for published alembics.""" import os -from avalon import api, pipeline +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -116,7 +119,7 @@ class PointCacheAlembicLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -140,7 +143,7 @@ class PointCacheAlembicLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = self.get_task(source_path, destination_path, name, True) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py index 0236bab138..b2c3889f68 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py @@ -2,7 +2,10 @@ """Load Skeletal Mesh alembics.""" import os -from avalon import api, pipeline +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -80,7 +83,7 @@ class SkeletalMeshAlembicLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -104,7 +107,7 @@ class SkeletalMeshAlembicLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = unreal.AssetImportTask() diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py index 3bcc8b476f..5a73c72c64 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -2,7 +2,10 @@ """Loader for Static Mesh alembics.""" import os -from avalon import api, pipeline +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -99,7 +102,7 @@ class StaticMeshAlembicLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -123,7 +126,7 @@ class StaticMeshAlembicLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = self.get_task(source_path, destination_path, name, True) diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index 63c734b969..c9a1633031 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -3,7 +3,10 @@ import os import json -from avalon import api, pipeline +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -134,7 +137,7 @@ class AnimationFBXLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -173,7 +176,7 @@ class AnimationFBXLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = unreal.AssetImportTask() diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py index 0de9470ef9..40bca0b0c7 100644 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ b/openpype/hosts/unreal/plugins/load/load_camera.py @@ -2,7 +2,8 @@ """Load camera from FBX.""" import os -from avalon import io, pipeline +from avalon import io +from openpype.pipeline import AVALON_CONTAINER_ID from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -116,7 +117,7 @@ class CameraLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index b802f5940a..7f6ce7d822 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -11,7 +11,13 @@ from unreal import AssetToolsHelpers from unreal import FBXImportType from unreal import MathLibrary as umath -from avalon import api, pipeline +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + load_container, + get_representation_path, + AVALON_CONTAINER_ID, +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -205,7 +211,7 @@ class LayoutLoader(plugin.Loader): with open(lib_path, "r") as fp: data = json.load(fp) - all_loaders = api.discover(api.Loader) + all_loaders = discover_loader_plugins() if not loaded: loaded = [] @@ -235,7 +241,7 @@ class LayoutLoader(plugin.Loader): loaded.append(reference) family = element.get('family') - loaders = api.loaders_from_representation( + loaders = loaders_from_representation( all_loaders, reference) loader = None @@ -252,7 +258,7 @@ class LayoutLoader(plugin.Loader): "asset_dir": asset_dir } - assets = api.load( + assets = load_container( loader, reference, namespace=instance_name, @@ -387,7 +393,7 @@ class LayoutLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -411,9 +417,9 @@ class LayoutLoader(plugin.Loader): def update(self, container, representation): ar = unreal.AssetRegistryHelpers.get_asset_registry() - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] - lib_path = Path(api.get_representation_path(representation)) + lib_path = Path(get_representation_path(representation)) self._remove_actors(destination_path) diff --git a/openpype/hosts/unreal/plugins/load/load_rig.py b/openpype/hosts/unreal/plugins/load/load_rig.py index a7ecb0ef7d..ff844a5e94 100644 --- a/openpype/hosts/unreal/plugins/load/load_rig.py +++ b/openpype/hosts/unreal/plugins/load/load_rig.py @@ -2,7 +2,10 @@ """Load Skeletal Meshes form FBX.""" import os -from avalon import api, pipeline +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -100,7 +103,7 @@ class SkeletalMeshFBXLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -124,7 +127,7 @@ class SkeletalMeshFBXLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = unreal.AssetImportTask() diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py index c8a6964ffb..282d249947 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py @@ -2,7 +2,10 @@ """Load Static meshes form FBX.""" import os -from avalon import api, pipeline +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa @@ -94,7 +97,7 @@ class StaticMeshFBXLoader(plugin.Loader): data = { "schema": "openpype:container-2.0", - "id": pipeline.AVALON_CONTAINER_ID, + "id": AVALON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -118,7 +121,7 @@ class StaticMeshFBXLoader(plugin.Loader): def update(self, container, representation): name = container["asset_name"] - source_path = api.get_representation_path(representation) + source_path = get_representation_path(representation) destination_path = container["namespace"] task = self.get_task(source_path, destination_path, name, True) diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py index 2d09b0e7bd..f34a47b89f 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py @@ -3,6 +3,8 @@ import os import json import math +from bson.objectid import ObjectId + import unreal from unreal import EditorLevelLibrary as ell from unreal import EditorAssetLibrary as eal @@ -62,7 +64,7 @@ class ExtractLayout(openpype.api.Extractor): blend = io.find_one( { "type": "representation", - "parent": io.ObjectId(parent), + "parent": ObjectId(parent), "name": "blend" }, projection={"_id": True}) diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py index a0eaef03ef..dbeb628073 100644 --- a/openpype/hosts/webpublisher/api/__init__.py +++ b/openpype/hosts/webpublisher/api/__init__.py @@ -5,7 +5,6 @@ from avalon import api as avalon from avalon import io from pyblish import api as pyblish import openpype.hosts.webpublisher -from openpype.pipeline import LegacyCreator log = logging.getLogger("openpype.hosts.webpublisher") @@ -13,16 +12,12 @@ HOST_DIR = os.path.dirname(os.path.abspath( openpype.hosts.webpublisher.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") def install(): print("Installing Pype config...") pyblish.register_plugin_path(PUBLISH_PATH) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) log.info(PUBLISH_PATH) io.install() @@ -30,8 +25,6 @@ def install(): def uninstall(): pyblish.deregister_plugin_path(PUBLISH_PATH) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) # to have required methods for interface diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index afd6f349db..65cef14703 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -14,8 +14,12 @@ import math from avalon import io import pyblish.api -from openpype.lib import prepare_template_data, get_asset, ffprobe_streams -from openpype.lib.vendor_bin_utils import get_fps +from openpype.lib import ( + prepare_template_data, + get_asset, + get_ffprobe_streams, + convert_ffprobe_fps_value, +) from openpype.lib.plugin_tools import ( parse_json, get_subset_name_with_asset_doc @@ -265,7 +269,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): def _get_number_of_frames(self, file_url): """Return duration in frames""" try: - streams = ffprobe_streams(file_url, self.log) + streams = get_ffprobe_streams(file_url, self.log) except Exception as exc: raise AssertionError(( "FFprobe couldn't read information about input file: \"{}\"." @@ -288,7 +292,9 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): "nb_frames {} not convertible".format(nb_frames)) duration = stream.get("duration") - frame_rate = get_fps(stream.get("r_frame_rate", '0/0')) + frame_rate = convert_ffprobe_fps_value( + stream.get("r_frame_rate", '0/0') + ) self.log.debug("duration:: {} frame_rate:: {}".format( duration, frame_rate)) try: diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 47c69265b0..e8b6d18f4e 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -26,10 +26,24 @@ from .vendor_bin_utils import ( get_vendor_bin_path, get_oiio_tools_path, get_ffmpeg_tool_path, - ffprobe_streams, is_oiio_supported ) +from .attribute_definitions import ( + AbtractAttrDef, + + UIDef, + UISeparatorDef, + UILabelDef, + + UnknownDef, + NumberDef, + TextDef, + EnumDef, + BoolDef, + FileDef, +) + from .env_tools import ( env_value_to_bool, get_paths_from_environ, @@ -90,7 +104,12 @@ from .profiles_filtering import ( from .transcoding import ( get_transcode_temp_directory, should_convert_for_ffmpeg, - convert_for_ffmpeg + convert_for_ffmpeg, + get_ffprobe_data, + get_ffprobe_streams, + get_ffmpeg_codec_args, + get_ffmpeg_format_args, + convert_ffprobe_fps_value, ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, @@ -110,6 +129,8 @@ from .avalon_context import ( get_workdir_data, get_workdir, get_workdir_with_workdir_data, + get_last_workfile_with_version, + get_last_workfile, create_workfile_doc, save_workfile_data_to_doc, @@ -225,9 +246,21 @@ __all__ = [ "get_vendor_bin_path", "get_oiio_tools_path", "get_ffmpeg_tool_path", - "ffprobe_streams", "is_oiio_supported", + "AbtractAttrDef", + + "UIDef", + "UISeparatorDef", + "UILabelDef", + + "UnknownDef", + "NumberDef", + "TextDef", + "EnumDef", + "BoolDef", + "FileDef", + "import_filepath", "modules_from_path", "recursive_bases_from_class", @@ -237,6 +270,11 @@ __all__ = [ "get_transcode_temp_directory", "should_convert_for_ffmpeg", "convert_for_ffmpeg", + "get_ffprobe_data", + "get_ffprobe_streams", + "get_ffmpeg_codec_args", + "get_ffmpeg_format_args", + "convert_ffprobe_fps_value", "CURRENT_DOC_SCHEMAS", "PROJECT_NAME_ALLOWED_SYMBOLS", @@ -255,6 +293,8 @@ __all__ = [ "get_workdir_data", "get_workdir", "get_workdir_with_workdir_data", + "get_last_workfile_with_version", + "get_last_workfile", "create_workfile_doc", "save_workfile_data_to_doc", diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index ef175ac89a..e72585c75a 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -28,7 +28,8 @@ from .local_settings import get_openpype_username from .avalon_context import ( get_workdir_data, get_workdir_with_workdir_data, - get_workfile_template_key + get_workfile_template_key, + get_last_workfile ) from .python_module_tools import ( @@ -1544,6 +1545,7 @@ def _prepare_last_workfile(data, workdir): workdir (str): Path to folder where workfiles should be stored. """ import avalon.api + from openpype.pipeline import HOST_WORKFILE_EXTENSIONS log = data["log"] @@ -1592,7 +1594,7 @@ def _prepare_last_workfile(data, workdir): # Last workfile path last_workfile_path = data.get("last_workfile_path") or "" if not last_workfile_path: - extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(app.host_name) + extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) if extensions: anatomy = data["anatomy"] project_settings = data["project_settings"] @@ -1609,7 +1611,7 @@ def _prepare_last_workfile(data, workdir): "ext": extensions[0] }) - last_workfile_path = avalon.api.last_workfile( + last_workfile_path = get_last_workfile( workdir, file_template, workdir_data, extensions, True ) diff --git a/openpype/pipeline/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py similarity index 100% rename from openpype/pipeline/lib/attribute_definitions.py rename to openpype/lib/attribute_definitions.py diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 03ad69a5e6..05d2ffd821 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -9,6 +9,8 @@ import collections import functools import getpass +from bson.objectid import ObjectId + from openpype.settings import ( get_project_settings, get_system_settings @@ -16,6 +18,7 @@ from openpype.settings import ( from .anatomy import Anatomy from .profiles_filtering import filter_profiles from .events import emit_event +from .path_templates import StringTemplate # avalon module is not imported at the top # - may not be in path at the time of pype.lib initialization @@ -168,7 +171,7 @@ def any_outdated(): representation_doc = avalon.io.find_one( { - "_id": avalon.io.ObjectId(representation), + "_id": ObjectId(representation), "type": "representation" }, projection={"parent": True} @@ -980,6 +983,8 @@ class BuildWorkfile: ... }] """ + from openpype.pipeline import discover_loader_plugins + # Get current asset name and entity current_asset_name = avalon.io.Session["AVALON_ASSET"] current_asset_entity = avalon.io.find_one({ @@ -996,7 +1001,7 @@ class BuildWorkfile: # Prepare available loaders loaders_by_name = {} - for loader in avalon.api.discover(avalon.api.Loader): + for loader in discover_loader_plugins(): loader_name = loader.__name__ if loader_name in loaders_by_name: raise KeyError( @@ -1390,6 +1395,11 @@ class BuildWorkfile: Returns: (list) Objects of loaded containers. """ + from openpype.pipeline import ( + IncompatibleLoaderError, + load_container, + ) + loaded_containers = [] # Get subset id order from build presets. @@ -1451,7 +1461,7 @@ class BuildWorkfile: if not loader: continue try: - container = avalon.api.load( + container = load_container( loader, repre["_id"], name=subset_name @@ -1460,7 +1470,7 @@ class BuildWorkfile: is_loaded = True except Exception as exc: - if exc == avalon.pipeline.IncompatibleLoaderError: + if exc == IncompatibleLoaderError: self.log.info(( "Loader `{}` is not compatible with" " representation `{}`" @@ -1728,8 +1738,6 @@ def get_custom_workfile_template_by_context( context. (Existence of formatted path is not validated.) """ - from openpype.lib import filter_profiles - if anatomy is None: anatomy = Anatomy(project_doc["name"]) @@ -1752,7 +1760,9 @@ def get_custom_workfile_template_by_context( # there are some anatomy template strings if matching_item: template = matching_item["path"][platform.system().lower()] - return template.format(**anatomy_context_data) + return StringTemplate.format_strict_template( + template, anatomy_context_data + ) return None @@ -1840,3 +1850,124 @@ def get_custom_workfile_template(template_profiles): io.Session["AVALON_TASK"], io ) + + +def get_last_workfile_with_version( + workdir, file_template, fill_data, extensions +): + """Return last workfile version. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(dict): Data for filling template. + extensions(list, tuple): All allowed file extensions of workfile. + + Returns: + tuple: Last workfile with version if there is any otherwise + returns (None, None). + """ + if not os.path.exists(workdir): + return None, None + + # Fast match on extension + filenames = [ + filename + for filename in os.listdir(workdir) + if os.path.splitext(filename)[1] in extensions + ] + + # Build template without optionals, version to digits only regex + # and comment to any definable value. + _ext = [] + for ext in extensions: + if not ext.startswith("."): + ext = "." + ext + # Escape dot for regex + ext = "\\" + ext + _ext.append(ext) + ext_expression = "(?:" + "|".join(_ext) + ")" + + # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end + file_template = re.sub(r"\.?{ext}", ext_expression, file_template) + # Replace optional keys with optional content regex + file_template = re.sub(r"<.*?>", r".*?", file_template) + # Replace `{version}` with group regex + file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) + file_template = re.sub(r"{comment.*?}", r".+?", file_template) + file_template = StringTemplate.format_strict_template( + file_template, fill_data + ) + + # Match with ignore case on Windows due to the Windows + # OS not being case-sensitive. This avoids later running + # into the error that the file did exist if it existed + # with a different upper/lower-case. + kwargs = {} + if platform.system().lower() == "windows": + kwargs["flags"] = re.IGNORECASE + + # Get highest version among existing matching files + version = None + output_filenames = [] + for filename in sorted(filenames): + match = re.match(file_template, filename, **kwargs) + if not match: + continue + + file_version = int(match.group(1)) + if version is None or file_version > version: + output_filenames[:] = [] + version = file_version + + if file_version == version: + output_filenames.append(filename) + + output_filename = None + if output_filenames: + if len(output_filenames) == 1: + output_filename = output_filenames[0] + else: + last_time = None + for _output_filename in output_filenames: + full_path = os.path.join(workdir, _output_filename) + mod_time = os.path.getmtime(full_path) + if last_time is None or last_time < mod_time: + output_filename = _output_filename + last_time = mod_time + + return output_filename, version + + +def get_last_workfile( + workdir, file_template, fill_data, extensions, full_path=False +): + """Return last workfile filename. + + Returns file with version 1 if there is not workfile yet. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(dict): Data for filling template. + extensions(list, tuple): All allowed file extensions of workfile. + full_path(bool): Full path to file is returned if set to True. + + Returns: + str: Last or first workfile as filename of full path to filename. + """ + filename, version = get_last_workfile_with_version( + workdir, file_template, fill_data, extensions + ) + if filename is None: + data = copy.deepcopy(fill_data) + data["version"] = 1 + data.pop("comment", None) + if not data.get("ext"): + data["ext"] = extensions[0] + filename = StringTemplate.format_strict_template(file_template, data) + + if full_path: + return os.path.normpath(os.path.join(workdir, filename)) + + return filename diff --git a/openpype/lib/delivery.py b/openpype/lib/delivery.py index 9fc65aae8e..ffcfe9fa4d 100644 --- a/openpype/lib/delivery.py +++ b/openpype/lib/delivery.py @@ -5,19 +5,30 @@ import glob import clique import collections +from .path_templates import ( + StringTemplate, + TemplateUnsolved, +) + def collect_frames(files): """ Returns dict of source path and its frame, if from sequence - Uses clique as most precise solution + Uses clique as most precise solution, used when anatomy template that + created files is not known. + + Assumption is that frames are separated by '.', negative frames are not + allowed. Args: - files(list): list of source paths + files(list) or (set with single value): list of source paths Returns: (dict): {'/asset/subset_v001.0001.png': '0001', ....} """ - collections, remainder = clique.assemble(files, minimum_items=1) + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble(files, minimum_items=1, + patterns=patterns) sources_and_frames = {} if collections: @@ -46,8 +57,6 @@ def sizeof_fmt(num, suffix='B'): def path_from_representation(representation, anatomy): - from avalon import pipeline # safer importing - try: template = representation["data"]["template"] @@ -57,12 +66,10 @@ def path_from_representation(representation, anatomy): try: context = representation["context"] context["root"] = anatomy.roots - path = pipeline.format_template_with_optional_keys( - context, template - ) - path = os.path.normpath(path.replace("/", "\\")) + path = StringTemplate.format_strict_template(template, context) + return os.path.normpath(path) - except KeyError: + except TemplateUnsolved: # Template references unavailable data return None diff --git a/openpype/lib/log.py b/openpype/lib/log.py index a42faef008..f33385e0ba 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -98,6 +98,10 @@ class PypeStreamHandler(logging.StreamHandler): self.flush() except (KeyboardInterrupt, SystemExit): raise + + except OSError: + self.handleError(record) + except Exception: print(repr(record)) self.handleError(record) @@ -227,7 +231,7 @@ class PypeLogger: logger = logging.getLogger(name or "__main__") - if cls.pype_debug > 1: + if cls.pype_debug > 0: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 62bfdf774a..14e5fe59f8 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -187,6 +187,16 @@ class StringTemplate(object): result.validate() return result + @classmethod + def format_template(cls, template, data): + objected_template = cls(template) + return objected_template.format(data) + + @classmethod + def format_strict_template(cls, template, data): + objected_template = cls(template) + return objected_template.format_strict(data) + @staticmethod def find_optional_parts(parts): new_parts = [] diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 19765a6f4a..f11ba56865 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -280,6 +280,7 @@ def set_plugin_attributes_from_settings( project_name (str): Name of project for which settings will be loaded. Value from environment `AVALON_PROJECT` is used if not entered. """ + from openpype.pipeline import LegacyCreator, LoaderPlugin # determine host application to use for finding presets if host_name is None: @@ -289,11 +290,11 @@ def set_plugin_attributes_from_settings( project_name = os.environ.get("AVALON_PROJECT") # map plugin superclass to preset json. Currently supported is load and - # create (avalon.api.Loader and avalon.api.Creator) + # create (LoaderPlugin and LegacyCreator) plugin_type = None - if superclass.__name__.split(".")[-1] in ("Loader", "SubsetLoader"): + if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin): plugin_type = "load" - elif superclass.__name__.split(".")[-1] in ("Creator", "LegacyCreator"): + elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator): plugin_type = "create" if not host_name or not project_name or plugin_type is None: diff --git a/openpype/lib/splash.txt b/openpype/lib/splash.txt deleted file mode 100644 index 833bcd4b9c..0000000000 --- a/openpype/lib/splash.txt +++ /dev/null @@ -1,413 +0,0 @@ - - - - * - - - - - - - .* - - - - - - * - .* - * - - - - . - * - .* - * - . - - . - * - .* - .* - .* - * - . - . - * - .* - .* - .* - * - . - _. - /** - \ * - \* - * - * - . - __. - ---* - \ \* - \ * - \* - * - . - \___. - /* * - \ \ * - \ \* - \ * - \* - . - |____. - /* * - \|\ * - \ \ * - \ \ * - \ \* - \/. - _/_____. - /* * - / \ * - \ \ * - \ \ * - \ \__* - \/__. - __________. - --*-- ___* - \ \ \/_* - \ \ __* - \ \ \_* - \ \____\* - \/____/. - \____________ . - /* ___ \* - \ \ \/_\ * - \ \ _____* - \ \ \___/* - \ \____\ * - \/____/ . - |___________ . - /* ___ \ * - \|\ \/_\ \ * - \ \ _____/ * - \ \ \___/ * - \ \____\ / * - \/____/ \. - _/__________ . - /* ___ \ * - / \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---* - \ \____\ / \__* - \/____/ \/__. - ____________ . - --*-- ___ \ * - \ \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\* - \/____/ \/____/. - ____________ - /\ ___ \ . - \ \ \/_\ \ * - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ . - \ \ _____/ * - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ . - \ \ \___/ ---- * - \ \____\ / \____\ . - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ - \ \ \___/ ---- * - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ - \ \ \___/ ---- . - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ _ - \ \ \___/ ---- - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ \ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ __\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ - \ \ \___/ ---- \ \ - \ \____\ / \____\ \__\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___. - \ \ \___/ ---- \ \\ - \ \____\ / \____\ \__\, - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ . - \ \ \___/ ---- \ \\ - \ \____\ / \____\ \__\\, - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ _. - \ \ \___/ ---- \ \\\ - \ \____\ / \____\ \__\\\ - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ __. - \ \ \___/ ---- \ \\ \ - \ \____\ / \____\ \__\\_/. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___. - \ \ \___/ ---- \ \\ \\ - \ \____\ / \____\ \__\\__\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ . - \ \ \___/ ---- \ \\ \\ - \ \____\ / \____\ \__\\__\\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ _. - \ \ \___/ ---- \ \\ \\\ - \ \____\ / \____\ \__\\__\\. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ __. - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\_. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ __. - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__. - \/____/ \/____/ - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ * - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ O* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ ..oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . p.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . Py.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYp.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPe.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE c.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE C1.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE ClU.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE CluB.oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club .oO* - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . .. - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . .. - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . . - ____________ - /\ ___ \ - \ \ \/_\ \ - \ \ _____/ ___ ___ ___ - \ \ \___/ ---- \ \\ \\ \ - \ \____\ / \____\ \__\\__\\__\ - \/____/ \/____/ . PYPE Club . diff --git a/openpype/lib/terminal_splash.py b/openpype/lib/terminal_splash.py deleted file mode 100644 index 0ba2706a27..0000000000 --- a/openpype/lib/terminal_splash.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pype terminal animation.""" -import blessed -from pathlib import Path -from time import sleep - -NO_TERMINAL = False - -try: - term = blessed.Terminal() -except AttributeError: - # this happens when blessed cannot find proper terminal. - # If so, skip printing ascii art animation. - NO_TERMINAL = True - - -def play_animation(): - """Play ASCII art Pype animation.""" - if NO_TERMINAL: - return - print(term.home + term.clear) - frame_size = 7 - splash_file = Path(__file__).parent / "splash.txt" - with splash_file.open("r") as sf: - animation = sf.readlines() - - animation_length = int(len(animation) / frame_size) - current_frame = 0 - for _ in range(animation_length): - frame = "".join( - scanline - for y, scanline in enumerate( - animation[current_frame: current_frame + frame_size] - ) - ) - - with term.location(0, 0): - # term.aquamarine3_bold(frame) - print(f"{term.bold}{term.aquamarine3}{frame}{term.normal}") - - sleep(0.02) - current_frame += frame_size - print(term.move_y(7)) diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 462745bcda..6bab6a8160 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -1,15 +1,18 @@ import os import re import logging +import json import collections import tempfile +import subprocess import xml.etree.ElementTree from .execute import run_subprocess from .vendor_bin_utils import ( + get_ffmpeg_tool_path, get_oiio_tools_path, - is_oiio_supported + is_oiio_supported, ) # Max length of string that is supported by ffmpeg @@ -483,3 +486,290 @@ def convert_for_ffmpeg( logger.debug("Conversion command: {}".format(" ".join(oiio_cmd))) run_subprocess(oiio_cmd, logger=logger) + + +# FFMPEG functions +def get_ffprobe_data(path_to_file, logger=None): + """Load data about entered filepath via ffprobe. + + Args: + path_to_file (str): absolute path + logger (logging.Logger): injected logger, if empty new is created + """ + if not logger: + logger = logging.getLogger(__name__) + logger.info( + "Getting information about input \"{}\".".format(path_to_file) + ) + args = [ + get_ffmpeg_tool_path("ffprobe"), + "-hide_banner", + "-loglevel", "fatal", + "-show_error", + "-show_format", + "-show_streams", + "-show_programs", + "-show_chapters", + "-show_private_data", + "-print_format", "json", + path_to_file + ] + + logger.debug("FFprobe command: {}".format( + subprocess.list2cmdline(args) + )) + popen = subprocess.Popen( + args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + popen_stdout, popen_stderr = popen.communicate() + if popen_stdout: + logger.debug("FFprobe stdout:\n{}".format( + popen_stdout.decode("utf-8") + )) + + if popen_stderr: + logger.warning("FFprobe stderr:\n{}".format( + popen_stderr.decode("utf-8") + )) + + return json.loads(popen_stdout) + + +def get_ffprobe_streams(path_to_file, logger=None): + """Load streams from entered filepath via ffprobe. + + Args: + path_to_file (str): absolute path + logger (logging.Logger): injected logger, if empty new is created + """ + return get_ffprobe_data(path_to_file, logger)["streams"] + + +def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None): + """Copy format from input metadata for output. + + Args: + ffprobe_data(dict): Data received from ffprobe. + source_ffmpeg_cmd(str): Command that created input if available. + """ + input_format = ffprobe_data.get("format") or {} + if input_format.get("format_name") == "mxf": + return _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd) + return [] + + +def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd): + input_format = ffprobe_data["format"] + format_tags = input_format.get("tags") or {} + product_name = format_tags.get("product_name") or "" + output = [] + if "opatom" in product_name.lower(): + output.extend(["-f", "mxf_opatom"]) + return output + + +def get_ffmpeg_codec_args(ffprobe_data, source_ffmpeg_cmd=None, logger=None): + """Copy codec from input metadata for output. + + Args: + ffprobe_data(dict): Data received from ffprobe. + source_ffmpeg_cmd(str): Command that created input if available. + """ + if logger is None: + logger = logging.getLogger(__name__) + + video_stream = None + no_audio_stream = None + for stream in ffprobe_data["streams"]: + codec_type = stream["codec_type"] + if codec_type == "video": + video_stream = stream + break + elif no_audio_stream is None and codec_type != "audio": + no_audio_stream = stream + + if video_stream is None: + if no_audio_stream is None: + logger.warning( + "Couldn't find stream that is not an audio file." + ) + return [] + logger.info( + "Didn't find video stream. Using first non audio stream." + ) + video_stream = no_audio_stream + + codec_name = video_stream.get("codec_name") + # Codec "prores" + if codec_name == "prores": + return _ffmpeg_prores_codec_args(video_stream, source_ffmpeg_cmd) + + # Codec "h264" + if codec_name == "h264": + return _ffmpeg_h264_codec_args(video_stream, source_ffmpeg_cmd) + + # Coded DNxHD + if codec_name == "dnxhd": + return _ffmpeg_dnxhd_codec_args(video_stream, source_ffmpeg_cmd) + + output = [] + if codec_name: + output.extend(["-codec:v", codec_name]) + + bit_rate = video_stream.get("bit_rate") + if bit_rate: + output.extend(["-b:v", bit_rate]) + + pix_fmt = video_stream.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + output.extend(["-g", "1"]) + + return output + + +def _ffmpeg_prores_codec_args(stream_data, source_ffmpeg_cmd): + output = [] + + tags = stream_data.get("tags") or {} + encoder = tags.get("encoder") or "" + if encoder.endswith("prores_ks"): + codec_name = "prores_ks" + + elif encoder.endswith("prores_aw"): + codec_name = "prores_aw" + + else: + codec_name = "prores" + + output.extend(["-codec:v", codec_name]) + + pix_fmt = stream_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + # Rest of arguments is prores_kw specific + if codec_name == "prores_ks": + codec_tag_to_profile_map = { + "apco": "proxy", + "apcs": "lt", + "apcn": "standard", + "apch": "hq", + "ap4h": "4444", + "ap4x": "4444xq" + } + codec_tag_str = stream_data.get("codec_tag_string") + if codec_tag_str: + profile = codec_tag_to_profile_map.get(codec_tag_str) + if profile: + output.extend(["-profile:v", profile]) + + return output + + +def _ffmpeg_h264_codec_args(stream_data, source_ffmpeg_cmd): + output = ["-codec:v", "h264"] + + # Use arguments from source if are available source arguments + if source_ffmpeg_cmd: + copy_args = ( + "-crf", + "-b:v", "-vb", + "-minrate", "-minrate:", + "-maxrate", "-maxrate:", + "-bufsize", "-bufsize:" + ) + args = source_ffmpeg_cmd.split(" ") + for idx, arg in enumerate(args): + if arg in copy_args: + output.extend([arg, args[idx + 1]]) + + pix_fmt = stream_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + output.extend(["-intra"]) + output.extend(["-g", "1"]) + + return output + + +def _ffmpeg_dnxhd_codec_args(stream_data, source_ffmpeg_cmd): + output = ["-codec:v", "dnxhd"] + + # Use source profile (profiles in metadata are not usable in args directly) + profile = stream_data.get("profile") or "" + # Lower profile and replace space with underscore + cleaned_profile = profile.lower().replace(" ", "_") + + # TODO validate this statement + # Looks like using 'dnxhd' profile must have set bit rate and in that case + # should be used bitrate from source. + # - related attributes 'bit_rate_defined', 'bit_rate_must_be_defined' + bit_rate_must_be_defined = True + dnx_profiles = { + "dnxhd", + "dnxhr_lb", + "dnxhr_sq", + "dnxhr_hq", + "dnxhr_hqx", + "dnxhr_444" + } + if cleaned_profile in dnx_profiles: + if cleaned_profile != "dnxhd": + bit_rate_must_be_defined = False + output.extend(["-profile:v", cleaned_profile]) + + pix_fmt = stream_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + # Use arguments from source if are available source arguments + bit_rate_defined = False + if source_ffmpeg_cmd: + # Define bitrate arguments + bit_rate_args = ("-b:v", "-vb",) + # Seprate the two variables in case something else should be copied + # from source command + copy_args = [] + copy_args.extend(bit_rate_args) + + args = source_ffmpeg_cmd.split(" ") + for idx, arg in enumerate(args): + if arg in copy_args: + if arg in bit_rate_args: + bit_rate_defined = True + output.extend([arg, args[idx + 1]]) + + # Add bitrate if needed + if bit_rate_must_be_defined and not bit_rate_defined: + src_bit_rate = stream_data.get("bit_rate") + if src_bit_rate: + output.extend(["-b:v", src_bit_rate]) + + output.extend(["-g", "1"]) + return output + + +def convert_ffprobe_fps_value(str_value): + """Returns (str) value of fps from ffprobe frame format (120/1)""" + if str_value == "0/0": + print("WARNING: Source has \"r_frame_rate\" value set to \"0/0\".") + return "Unknown" + + items = str_value.split("/") + if len(items) == 1: + fps = float(items[0]) + + elif len(items) == 2: + fps = float(items[0]) / float(items[1]) + + # Check if fps is integer or float number + if int(fps) == fps: + fps = int(fps) + + return str(fps) diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 3ae7430c7b..89021156b4 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -315,7 +315,7 @@ def get_usd_master_path(asset, subset, representation): ) template = project["config"]["template"]["publish"] - if isinstance(asset, dict) and "silo" in asset and "name" in asset: + if isinstance(asset, dict) and "name" in asset: # Allow explicitly passing asset document asset_doc = asset else: @@ -325,7 +325,6 @@ def get_usd_master_path(asset, subset, representation): **{ "root": api.registered_root(), "project": api.Session["AVALON_PROJECT"], - "silo": asset_doc["silo"], "asset": asset_doc["name"], "subset": subset, "representation": representation, diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index 4b11f1c046..23e28ea304 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -1,8 +1,6 @@ import os import logging -import json import platform -import subprocess log = logging.getLogger("Vendor utils") @@ -138,56 +136,6 @@ def get_ffmpeg_tool_path(tool="ffmpeg"): return find_executable(os.path.join(ffmpeg_dir, tool)) -def ffprobe_streams(path_to_file, logger=None): - """Load streams from entered filepath via ffprobe. - - Args: - path_to_file (str): absolute path - logger (logging.getLogger): injected logger, if empty new is created - - """ - if not logger: - logger = log - logger.info( - "Getting information about input \"{}\".".format(path_to_file) - ) - args = [ - get_ffmpeg_tool_path("ffprobe"), - "-hide_banner", - "-loglevel", "fatal", - "-show_error", - "-show_format", - "-show_streams", - "-show_programs", - "-show_chapters", - "-show_private_data", - "-print_format", "json", - path_to_file - ] - - logger.debug("FFprobe command: {}".format( - subprocess.list2cmdline(args) - )) - popen = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - - popen_stdout, popen_stderr = popen.communicate() - if popen_stdout: - logger.debug("FFprobe stdout:\n{}".format( - popen_stdout.decode("utf-8") - )) - - if popen_stderr: - logger.warning("FFprobe stderr:\n{}".format( - popen_stderr.decode("utf-8") - )) - - return json.loads(popen_stdout)["streams"] - - def is_oiio_supported(): """Checks if oiiotool is configured for this platform. @@ -204,23 +152,3 @@ def is_oiio_supported(): )) return False return True - - -def get_fps(str_value): - """Returns (str) value of fps from ffprobe frame format (120/1)""" - if str_value == "0/0": - print("WARNING: Source has \"r_frame_rate\" value set to \"0/0\".") - return "Unknown" - - items = str_value.split("/") - if len(items) == 1: - fps = float(items[0]) - - elif len(items) == 2: - fps = float(items[0]) / float(items[1]) - - # Check if fps is integer or float number - if int(fps) == fps: - fps = int(fps) - - return str(fps) diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index 2918b54d4a..c499c14d40 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -6,6 +6,7 @@ import pyblish.api from avalon import api from openpype.lib import env_value_to_bool +from openpype.lib.delivery import collect_frames from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo @@ -102,24 +103,18 @@ class AfterEffectsSubmitDeadline( def get_plugin_info(self): deadline_plugin_info = DeadlinePluginInfo() - context = self._instance.context - script_path = context.data["currentFile"] render_path = self._instance.data["expectedFiles"][0] - if len(self._instance.data["expectedFiles"]) > 1: + file_name, frame = list(collect_frames([render_path]).items())[0] + if frame: # replace frame ('000001') with Deadline's required '[#######]' # expects filename in format project_asset_subset_version.FRAME.ext render_dir = os.path.dirname(render_path) file_name = os.path.basename(render_path) - arr = file_name.split('.') - assert len(arr) == 3, \ - "Unable to parse frames from {}".format(file_name) - hashed = '[{}]'.format(len(arr[1]) * "#") - - render_path = os.path.join(render_dir, - '{}.{}.{}'.format(arr[0], hashed, - arr[2])) + hashed = '[{}]'.format(len(frame) * "#") + file_name = file_name.replace(frame, hashed) + render_path = os.path.join(render_dir, file_name) deadline_plugin_info.Comp = self._instance.data["comp_name"] deadline_plugin_info.Version = self._instance.data["app_version"] diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 1de1c37575..fad4d14ea0 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -13,6 +13,8 @@ from avalon import api, io import pyblish.api +from openpype.pipeline import get_representation_path + def get_resources(version, extension=None): """Get the files from the specific version.""" @@ -23,7 +25,7 @@ def get_resources(version, extension=None): representation = io.find_one(query) assert representation, "This is a bug" - directory = api.get_representation_path(representation) + directory = get_representation_path(representation) print("Source: ", directory) resources = sorted( [ @@ -234,6 +236,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): environment["OPENPYPE_MONGO"] = mongo_url args = [ + "--headless", 'publish', roothless_metadata_path, "--targets", "deadline", @@ -604,7 +607,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "fps": instance.get("fps"), "tags": ["review"] }) - self._solve_families(instance, True) + self._solve_families(instance, True) + + already_there = False + for repre in instance.get("representations", []): + # might be added explicitly before by publish_on_farm + already_there = repre.get("files") == rep["files"] + if already_there: + self.log.debug("repre {} already_there".format(repre)) + break + + if not already_there: + representations.append(rep) return representations diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index 82c2494e7a..eeb1f7744c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -46,6 +46,7 @@ def inject_openpype_environment(deadlinePlugin): args = [ openpype_app, + "--headless", 'extractenvironments', export_url ] diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index eea6436b53..46c333c4c4 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -199,8 +199,10 @@ class SyncToAvalonEvent(BaseEvent): if proj: ftrack_id = proj["data"].get("ftrackId") if ftrack_id is None: - ftrack_id = self._update_project_ftrack_id() - proj["data"]["ftrackId"] = ftrack_id + self.handle_missing_ftrack_id(proj) + ftrack_id = proj["data"]["ftrackId"] + self._avalon_ents_by_ftrack_id[ftrack_id] = proj + self._avalon_ents_by_ftrack_id[ftrack_id] = proj for ent in ents: ftrack_id = ent["data"].get("ftrackId") @@ -209,15 +211,78 @@ class SyncToAvalonEvent(BaseEvent): self._avalon_ents_by_ftrack_id[ftrack_id] = ent return self._avalon_ents_by_ftrack_id - def _update_project_ftrack_id(self): - ftrack_id = self.cur_project["id"] + def handle_missing_ftrack_id(self, doc): + # TODO handling of missing ftrack id is primarily issue of editorial + # publishing it would be better to find out what causes that + # ftrack id is removed during the publishing + ftrack_id = doc["data"].get("ftrackId") + if ftrack_id is not None: + return + if doc["type"] == "project": + ftrack_id = self.cur_project["id"] + + self.dbcon.update_one( + {"type": "project"}, + {"$set": { + "data.ftrackId": ftrack_id, + "data.entityType": self.cur_project.entity_type + }} + ) + + doc["data"]["ftrackId"] = ftrack_id + doc["data"]["entityType"] = self.cur_project.entity_type + self.log.info("Updated ftrack id of project \"{}\"".format( + self.cur_project["full_name"] + )) + return + + if doc["type"] != "asset": + return + + doc_parents = doc.get("data", {}).get("parents") + if doc_parents is None: + return + + entities = self.process_session.query(( + "select id, link from TypedContext" + " where project_id is \"{}\" and name is \"{}\"" + ).format(self.cur_project["id"], doc["name"])).all() + self.log.info("Entities: {}".format(str(entities))) + matching_entity = None + for entity in entities: + parents = [] + for item in entity["link"]: + if item["id"] == entity["id"]: + break + low_type = item["type"].lower() + if low_type == "typedcontext": + parents.append(item["name"]) + if doc_parents == parents: + matching_entity = entity + break + + if matching_entity is None: + return + + ftrack_id = matching_entity["id"] self.dbcon.update_one( - {"type": "project"}, - {"$set": {"data.ftrackId": ftrack_id}} + {"_id": doc["_id"]}, + {"$set": { + "data.ftrackId": ftrack_id, + "data.entityType": matching_entity.entity_type + }} ) + doc["data"]["ftrackId"] = ftrack_id + doc["data"]["entityType"] = matching_entity.entity_type - return ftrack_id + entity_path_items = [] + for item in entity["link"]: + entity_path_items.append(item["name"]) + self.log.info("Updated ftrack id of entity \"{}\"".format( + "/".join(entity_path_items) + )) + self._avalon_ents_by_ftrack_id[ftrack_id] = doc @property def avalon_subsets_by_parents(self): @@ -857,7 +922,14 @@ class SyncToAvalonEvent(BaseEvent): if vis_par is None: vis_par = proj["_id"] parent_ent = self.avalon_ents_by_id[vis_par] - parent_ftrack_id = parent_ent["data"]["ftrackId"] + + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + self.handle_missing_ftrack_id(parent_ent) + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + continue + parent_ftrack_ent = self.ftrack_ents_by_id.get( parent_ftrack_id ) @@ -2128,7 +2200,13 @@ class SyncToAvalonEvent(BaseEvent): vis_par = avalon_ent["parent"] parent_ent = self.avalon_ents_by_id[vis_par] - parent_ftrack_id = parent_ent["data"]["ftrackId"] + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + self.handle_missing_ftrack_id(parent_ent) + parent_ftrack_id = parent_ent["data"].get("ftrackId") + if parent_ftrack_id is None: + continue + if parent_ftrack_id not in entities_dict: entities_dict[parent_ftrack_id] = { "children": [], diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index efc1e76775..96243c8c36 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -87,8 +87,8 @@ class UserAssigmentEvent(BaseEvent): if not user_id: return None, None - task = session.query('Task where id is "{}"'.format(task_id)).one() - user = session.query('User where id is "{}"'.format(user_id)).one() + task = session.query('Task where id is "{}"'.format(task_id)).first() + user = session.query('User where id is "{}"'.format(user_id)).first() return task, user diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index c66d1819ac..1b694e25f1 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -5,11 +5,11 @@ import uuid import clique from pymongo import UpdateOne -from openpype_modules.ftrack.lib import BaseAction, statics_icon from avalon.api import AvalonMongoDB -from openpype.api import Anatomy -import avalon.pipeline +from openpype.api import Anatomy +from openpype.lib import StringTemplate, TemplateUnsolved +from openpype_modules.ftrack.lib import BaseAction, statics_icon class DeleteOldVersions(BaseAction): @@ -563,18 +563,16 @@ class DeleteOldVersions(BaseAction): try: context = representation["context"] context["root"] = anatomy.roots - path = avalon.pipeline.format_template_with_optional_keys( - context, template - ) + path = StringTemplate.format_strict_template(template, context) if "frame" in context: context["frame"] = self.sequence_splitter sequence_path = os.path.normpath( - avalon.pipeline.format_template_with_optional_keys( + StringTemplate.format_strict_template( context, template ) ) - except KeyError: + except (KeyError, TemplateUnsolved): # Template references unavailable data return (None, None) diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py new file mode 100644 index 0000000000..3888379e04 --- /dev/null +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -0,0 +1,494 @@ +import os +import sys +import json +import collections +import tempfile +import datetime + +import ftrack_api + +from avalon.api import AvalonMongoDB +from openpype.api import get_project_settings +from openpype.lib import ( + get_workfile_template_key, + get_workdir_data, + Anatomy, + StringTemplate, +) +from openpype_modules.ftrack.lib import BaseAction, statics_icon +from openpype_modules.ftrack.lib.avalon_sync import create_chunks + +NOT_SYNCHRONIZED_TITLE = "Not synchronized" + + +class FillWorkfileAttributeAction(BaseAction): + """Action fill work filename into custom attribute on tasks. + + Prerequirements are that the project is synchronized so it is possible to + access project anatomy and project/asset documents. Tasks that are not + synchronized are skipped too. + """ + + identifier = "fill.workfile.attr" + label = "OpenPype Admin" + variant = "- Fill workfile attribute" + description = "Precalculate and fill workfile name into a custom attribute" + icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") + + settings_key = "fill_workfile_attribute" + + def discover(self, session, entities, event): + """ Validate selection. """ + is_valid = False + for ent in event["data"]["selection"]: + # Ignore entities that are not tasks or projects + if ent["entityType"].lower() in ["show", "task"]: + is_valid = True + break + + if is_valid: + is_valid = self.valid_roles(session, entities, event) + return is_valid + + def launch(self, session, entities, event): + # Separate entities and get project entity + project_entity = None + for entity in entities: + if project_entity is None: + project_entity = self.get_project_from_entity(entity) + break + + if not project_entity: + return { + "message": ( + "Couldn't find project entity." + " Could be an issue with permissions." + ), + "success": False + } + + # Get project settings and check if custom attribute where workfile + # should be set is defined. + project_name = project_entity["full_name"] + project_settings = get_project_settings(project_name) + custom_attribute_key = ( + project_settings + .get("ftrack", {}) + .get("user_handlers", {}) + .get(self.settings_key, {}) + .get("custom_attribute_key") + ) + if not custom_attribute_key: + return { + "success": False, + "message": "Custom attribute key is not set in settings" + } + + # Try to find the custom attribute + # - get Task type object id + task_obj_type = session.query( + "select id from ObjectType where name is \"Task\"" + ).one() + # - get text custom attribute type + text_type = session.query( + "select id from CustomAttributeType where name is \"text\"" + ).one() + # - find the attribute + attr_conf = session.query( + ( + "select id, key from CustomAttributeConfiguration" + " where object_type_id is \"{}\"" + " and type_id is \"{}\"" + " and key is \"{}\"" + ).format( + task_obj_type["id"], text_type["id"], custom_attribute_key + ) + ).first() + if not attr_conf: + return { + "success": False, + "message": ( + "Could not find Task (text) Custom attribute \"{}\"" + ).format(custom_attribute_key) + } + + # Store report information + report = collections.defaultdict(list) + user_entity = session.query( + "User where id is {}".format(event["source"]["user"]["id"]) + ).one() + job_entity = session.create("Job", { + "user": user_entity, + "status": "running", + "data": json.dumps({ + "description": "(0/3) Fill of workfiles started" + }) + }) + session.commit() + + try: + self.in_job_process( + session, + entities, + job_entity, + project_entity, + project_settings, + attr_conf, + report + ) + except Exception: + self.log.error( + "Fill of workfiles to custom attribute failed", exc_info=True + ) + session.rollback() + + description = "Fill of workfiles Failed (Download traceback)" + self.add_traceback_to_job( + job_entity, session, sys.exc_info(), description + ) + return { + "message": ( + "Fill of workfiles failed." + " Check job for more information" + ), + "success": False + } + + job_entity["status"] = "done" + job_entity["data"] = json.dumps({ + "description": "Fill of workfiles completed." + }) + session.commit() + if report: + temp_obj = tempfile.NamedTemporaryFile( + mode="w", + prefix="openpype_ftrack_", + suffix=".json", + delete=False + ) + temp_obj.close() + temp_filepath = temp_obj.name + with open(temp_filepath, "w") as temp_file: + json.dump(report, temp_file) + + component_name = "{}_{}".format( + "FillWorkfilesReport", + datetime.datetime.now().strftime("%y-%m-%d-%H%M") + ) + self.add_file_component_to_job( + job_entity, session, temp_filepath, component_name + ) + # Delete temp file + os.remove(temp_filepath) + self._show_report(event, report, project_name) + return { + "message": ( + "Fill of workfiles finished with few issues." + " Check job for more information" + ), + "success": True + } + + return { + "success": True, + "message": "Finished with filling of work filenames" + } + + def _show_report(self, event, report, project_name): + items = [] + title = "Fill workfiles report ({}):".format(project_name) + + for subtitle, lines in report.items(): + if items: + items.append({ + "type": "label", + "value": "---" + }) + items.append({ + "type": "label", + "value": "# {}".format(subtitle) + }) + items.append({ + "type": "label", + "value": '

{}

'.format("
".join(lines)) + }) + + self.show_interface( + items=items, + title=title, + event=event + ) + + def in_job_process( + self, + session, + entities, + job_entity, + project_entity, + project_settings, + attr_conf, + report + ): + task_entities = [] + other_entities = [] + project_selected = False + for entity in entities: + ent_type_low = entity.entity_type.lower() + if ent_type_low == "project": + project_selected = True + break + + elif ent_type_low == "task": + task_entities.append(entity) + else: + other_entities.append(entity) + + project_name = project_entity["full_name"] + + # Find matchin asset documents and map them by ftrack task entities + # - result stored to 'asset_docs_with_task_entities' is list with + # tuple `(asset document, [task entitis, ...])` + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + # Quety all asset documents + asset_docs = list(dbcon.find({"type": "asset"})) + job_entity["data"] = json.dumps({ + "description": "(1/3) Asset documents queried." + }) + session.commit() + + # When project is selected then we can query whole project + if project_selected: + asset_docs_with_task_entities = self._get_asset_docs_for_project( + session, project_entity, asset_docs, report + ) + + else: + asset_docs_with_task_entities = self._get_tasks_for_selection( + session, other_entities, task_entities, asset_docs, report + ) + + job_entity["data"] = json.dumps({ + "description": "(2/3) Queried related task entities." + }) + session.commit() + + # Keep placeholders in the template unfilled + host_name = "{app}" + extension = "{ext}" + project_doc = dbcon.find_one({"type": "project"}) + project_settings = get_project_settings(project_name) + anatomy = Anatomy(project_name) + templates_by_key = {} + + operations = [] + for asset_doc, task_entities in asset_docs_with_task_entities: + for task_entity in task_entities: + workfile_data = get_workdir_data( + project_doc, asset_doc, task_entity["name"], host_name + ) + # Use version 1 for each workfile + workfile_data["version"] = 1 + workfile_data["ext"] = extension + + task_type = workfile_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, host_name, project_settings=project_settings + ) + if template_key in templates_by_key: + template = templates_by_key[template_key] + else: + template = StringTemplate( + anatomy.templates[template_key]["file"] + ) + templates_by_key[template_key] = template + + result = template.format(workfile_data) + if not result.solved: + # TODO report + pass + else: + table_values = collections.OrderedDict(( + ("configuration_id", attr_conf["id"]), + ("entity_id", task_entity["id"]) + )) + operations.append( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + table_values, + "value", + ftrack_api.symbol.NOT_SET, + str(result) + ) + ) + + if operations: + for sub_operations in create_chunks(operations, 50): + for op in sub_operations: + session.recorded_operations.push(op) + session.commit() + + job_entity["data"] = json.dumps({ + "description": "(3/3) Set custom attribute values." + }) + session.commit() + + def _get_entity_path(self, entity): + path_items = [] + for item in entity["link"]: + if item["type"].lower() != "project": + path_items.append(item["name"]) + return "/".join(path_items) + + def _get_asset_docs_for_project( + self, session, project_entity, asset_docs, report + ): + asset_docs_task_names = {} + + for asset_doc in asset_docs: + asset_data = asset_doc["data"] + ftrack_id = asset_data.get("ftrackId") + if not ftrack_id: + hierarchy = list(asset_data.get("parents") or []) + hierarchy.append(asset_doc["name"]) + path = "/".join(hierarchy) + report[NOT_SYNCHRONIZED_TITLE].append(path) + continue + + asset_tasks = asset_data.get("tasks") or {} + asset_docs_task_names[ftrack_id] = ( + asset_doc, list(asset_tasks.keys()) + ) + + task_entities = session.query(( + "select id, name, parent_id, link from Task where project_id is {}" + ).format(project_entity["id"])).all() + task_entities_by_parent_id = collections.defaultdict(list) + for task_entity in task_entities: + parent_id = task_entity["parent_id"] + task_entities_by_parent_id[parent_id].append(task_entity) + + output = [] + for ftrack_id, item in asset_docs_task_names.items(): + asset_doc, task_names = item + valid_task_entities = [] + for task_entity in task_entities_by_parent_id[ftrack_id]: + if task_entity["name"] in task_names: + valid_task_entities.append(task_entity) + else: + path = self._get_entity_path(task_entity) + report[NOT_SYNCHRONIZED_TITLE].append(path) + + if valid_task_entities: + output.append((asset_doc, valid_task_entities)) + + return output + + def _get_tasks_for_selection( + self, session, other_entities, task_entities, asset_docs, report + ): + all_tasks = object() + asset_docs_by_ftrack_id = {} + asset_docs_by_parent_id = collections.defaultdict(list) + for asset_doc in asset_docs: + asset_data = asset_doc["data"] + ftrack_id = asset_data.get("ftrackId") + parent_id = asset_data.get("visualParent") + asset_docs_by_parent_id[parent_id].append(asset_doc) + if ftrack_id: + asset_docs_by_ftrack_id[ftrack_id] = asset_doc + + missing_doc_ftrack_ids = {} + all_tasks_ids = set() + task_names_by_ftrack_id = collections.defaultdict(list) + for other_entity in other_entities: + ftrack_id = other_entity["id"] + if ftrack_id not in asset_docs_by_ftrack_id: + missing_doc_ftrack_ids[ftrack_id] = None + continue + all_tasks_ids.add(ftrack_id) + task_names_by_ftrack_id[ftrack_id] = all_tasks + + for task_entity in task_entities: + parent_id = task_entity["parent_id"] + if parent_id not in asset_docs_by_ftrack_id: + missing_doc_ftrack_ids[parent_id] = None + continue + + if all_tasks_ids not in all_tasks_ids: + task_names_by_ftrack_id[ftrack_id].append(task_entity["name"]) + + ftrack_ids = set() + asset_doc_with_task_names_by_id = {} + for ftrack_id, task_names in task_names_by_ftrack_id.items(): + asset_doc = asset_docs_by_ftrack_id[ftrack_id] + asset_data = asset_doc["data"] + asset_tasks = asset_data.get("tasks") or {} + + if task_names is all_tasks: + task_names = list(asset_tasks.keys()) + else: + new_task_names = [] + for task_name in task_names: + if task_name in asset_tasks: + new_task_names.append(task_name) + continue + + if ftrack_id not in missing_doc_ftrack_ids: + missing_doc_ftrack_ids[ftrack_id] = [] + if missing_doc_ftrack_ids[ftrack_id] is not None: + missing_doc_ftrack_ids[ftrack_id].append(task_name) + + task_names = new_task_names + + if task_names: + ftrack_ids.add(ftrack_id) + asset_doc_with_task_names_by_id[ftrack_id] = ( + asset_doc, task_names + ) + + task_entities = session.query(( + "select id, name, parent_id from Task where parent_id in ({})" + ).format(self.join_query_keys(ftrack_ids))).all() + task_entitiy_by_parent_id = collections.defaultdict(list) + for task_entity in task_entities: + parent_id = task_entity["parent_id"] + task_entitiy_by_parent_id[parent_id].append(task_entity) + + output = [] + for ftrack_id, item in asset_doc_with_task_names_by_id.items(): + asset_doc, task_names = item + valid_task_entities = [] + for task_entity in task_entitiy_by_parent_id[ftrack_id]: + if task_entity["name"] in task_names: + valid_task_entities.append(task_entity) + else: + if ftrack_id not in missing_doc_ftrack_ids: + missing_doc_ftrack_ids[ftrack_id] = [] + if missing_doc_ftrack_ids[ftrack_id] is not None: + missing_doc_ftrack_ids[ftrack_id].append(task_name) + if valid_task_entities: + output.append((asset_doc, valid_task_entities)) + + # Store report information about not synchronized entities + if missing_doc_ftrack_ids: + missing_entities = session.query( + "select id, link from TypedContext where id in ({})".format( + self.join_query_keys(missing_doc_ftrack_ids.keys()) + ) + ).all() + for missing_entity in missing_entities: + path = self._get_entity_path(missing_entity) + task_names = missing_doc_ftrack_ids[missing_entity["id"]] + if task_names is None: + report[NOT_SYNCHRONIZED_TITLE].append(path) + else: + for task_name in task_names: + task_path = "/".join([path, task_name]) + report[NOT_SYNCHRONIZED_TITLE].append(task_path) + + return output + + +def register(session): + FillWorkfileAttributeAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index 71d790f7e7..bdb0eaf250 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -3,9 +3,10 @@ import subprocess import traceback import json -from openpype_modules.ftrack.lib import BaseAction, statics_icon import ftrack_api from avalon import io, api +from openpype.pipeline import get_representation_path +from openpype_modules.ftrack.lib import BaseAction, statics_icon class RVAction(BaseAction): @@ -307,7 +308,7 @@ class RVAction(BaseAction): "name": "preview" } ) - paths.append(api.get_representation_path(representation)) + paths.append(get_representation_path(representation)) return paths diff --git a/openpype/modules/ftrack/lib/ftrack_event_handler.py b/openpype/modules/ftrack/lib/ftrack_event_handler.py index af565c5421..0a70b0e301 100644 --- a/openpype/modules/ftrack/lib/ftrack_event_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_event_handler.py @@ -44,7 +44,7 @@ class BaseEvent(BaseHandler): return self._get_entities( event, session, - ignore=['socialfeed', 'socialnotification'] + ignore=['socialfeed', 'socialnotification', 'team'] ) def get_project_name_from_event(self, session, event, project_id): diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index 7147e56dd2..6ed307dbc7 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -1,4 +1,7 @@ -from .lib import attribute_definitions +from .constants import ( + AVALON_CONTAINER_ID, + HOST_WORKFILE_EXTENSIONS, +) from .create import ( BaseCreator, @@ -12,6 +15,28 @@ from .create import ( legacy_create, ) +from .load import ( + HeroVersionType, + IncompatibleLoaderError, + LoaderPlugin, + SubsetLoaderPlugin, + + discover_loader_plugins, + register_loader_plugin, + deregister_loader_plugin_path, + register_loader_plugin_path, + deregister_loader_plugin, + + load_container, + remove_container, + update_container, + switch_container, + + loaders_from_representation, + get_representation_path, + get_repres_contexts, +) + from .publish import ( PublishValidationError, PublishXmlValidationError, @@ -19,10 +44,30 @@ from .publish import ( OpenPypePyblishPluginMixin ) +from .actions import ( + LauncherAction, + + InventoryAction, + + discover_launcher_actions, + register_launcher_action, + register_launcher_action_path, + + discover_inventory_actions, + register_inventory_action, + register_inventory_action_path, + deregister_inventory_action, + deregister_inventory_action_path, +) + __all__ = ( + "AVALON_CONTAINER_ID", + "HOST_WORKFILE_EXTENSIONS", + "attribute_definitions", + # --- Create --- "BaseCreator", "Creator", "AutoCreator", @@ -30,12 +75,48 @@ __all__ = ( "CreatorError", - # Legacy creation + # - legacy creation "LegacyCreator", "legacy_create", + # --- Load --- + "HeroVersionType", + "IncompatibleLoaderError", + "LoaderPlugin", + "SubsetLoaderPlugin", + + "discover_loader_plugins", + "register_loader_plugin", + "deregister_loader_plugin_path", + "register_loader_plugin_path", + "deregister_loader_plugin", + + "load_container", + "remove_container", + "update_container", + "switch_container", + + "loaders_from_representation", + "get_representation_path", + "get_repres_contexts", + + # --- Publish --- "PublishValidationError", "PublishXmlValidationError", "KnownPublishError", - "OpenPypePyblishPluginMixin" + "OpenPypePyblishPluginMixin", + + # --- Actions --- + "LauncherAction", + "InventoryAction", + + "discover_launcher_actions", + "register_launcher_action", + "register_launcher_action_path", + + "discover_inventory_actions", + "register_inventory_action", + "register_inventory_action_path", + "deregister_inventory_action", + "deregister_inventory_action_path", ) diff --git a/openpype/pipeline/actions.py b/openpype/pipeline/actions.py new file mode 100644 index 0000000000..a045c92aa7 --- /dev/null +++ b/openpype/pipeline/actions.py @@ -0,0 +1,146 @@ +import logging + + +class LauncherAction(object): + """A custom action available""" + name = None + label = None + icon = None + color = None + order = 0 + + log = logging.getLogger("LauncherAction") + log.propagate = True + + def is_compatible(self, session): + """Return whether the class is compatible with the Session.""" + return True + + def process(self, session, **kwargs): + pass + + +class InventoryAction(object): + """A custom action for the scene inventory tool + + If registered the action will be visible in the Right Mouse Button menu + under the submenu "Actions". + + """ + + label = None + icon = None + color = None + order = 0 + + log = logging.getLogger("InventoryAction") + log.propagate = True + + @staticmethod + def is_compatible(container): + """Override function in a custom class + + This method is specifically used to ensure the action can operate on + the container. + + Args: + container(dict): the data of a loaded asset, see host.ls() + + Returns: + bool + """ + return bool(container.get("objectName")) + + def process(self, containers): + """Override function in a custom class + + This method will receive all containers even those which are + incompatible. It is advised to create a small filter along the lines + of this example: + + valid_containers = filter(self.is_compatible(c) for c in containers) + + The return value will need to be a True-ish value to trigger + the data_changed signal in order to refresh the view. + + You can return a list of container names to trigger GUI to select + treeview items. + + You can return a dict to carry extra GUI options. For example: + { + "objectNames": [container names...], + "options": {"mode": "toggle", + "clear": False} + } + Currently workable GUI options are: + - clear (bool): Clear current selection before selecting by action. + Default `True`. + - mode (str): selection mode, use one of these: + "select", "deselect", "toggle". Default is "select". + + Args: + containers (list): list of dictionaries + + Return: + bool, list or dict + + """ + return True + + +# Launcher action +def discover_launcher_actions(): + import avalon.api + + return avalon.api.discover(LauncherAction) + + +def register_launcher_action(plugin): + import avalon.api + + return avalon.api.register_plugin(LauncherAction, plugin) + + +def register_launcher_action_path(path): + import avalon.api + + return avalon.api.register_plugin_path(LauncherAction, path) + + +# Inventory action +def discover_inventory_actions(): + import avalon.api + + actions = avalon.api.discover(InventoryAction) + filtered_actions = [] + for action in actions: + if action is not InventoryAction: + print("DISCOVERED", action) + filtered_actions.append(action) + else: + print("GOT SOURCE") + return filtered_actions + + +def register_inventory_action(plugin): + import avalon.api + + return avalon.api.register_plugin(InventoryAction, plugin) + + +def deregister_inventory_action(plugin): + import avalon.api + + avalon.api.deregister_plugin(InventoryAction, plugin) + + +def register_inventory_action_path(path): + import avalon.api + + return avalon.api.register_plugin_path(InventoryAction, path) + + +def deregister_inventory_action_path(path): + import avalon.api + + return avalon.api.deregister_plugin_path(InventoryAction, path) diff --git a/openpype/pipeline/constants.py b/openpype/pipeline/constants.py new file mode 100644 index 0000000000..e6496cbf95 --- /dev/null +++ b/openpype/pipeline/constants.py @@ -0,0 +1,19 @@ +# Metadata ID of loaded container into scene +AVALON_CONTAINER_ID = "pyblish.avalon.container" + +# TODO get extensions from host implementations +HOST_WORKFILE_EXTENSIONS = { + "blender": [".blend"], + "celaction": [".scn"], + "tvpaint": [".tvpp"], + "fusion": [".comp"], + "harmony": [".zip"], + "houdini": [".hip", ".hiplc", ".hipnc"], + "maya": [".ma", ".mb"], + "nuke": [".nk"], + "hiero": [".hrox"], + "photoshop": [".psd", ".psb"], + "premiere": [".prproj"], + "resolve": [".drp"], + "aftereffects": [".aep"] +} diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 15417a4ff8..e86cb88229 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -6,7 +6,6 @@ import inspect from uuid import uuid4 from contextlib import contextmanager -from ..lib import UnknownDef from .creator_plugins import ( BaseCreator, Creator, @@ -89,6 +88,8 @@ class AttributeValues: origin_data(dict): Values loaded from host before conversion. """ def __init__(self, attr_defs, values, origin_data=None): + from openpype.lib.attribute_definitions import UnknownDef + if origin_data is None: origin_data = copy.deepcopy(values) self._origin_data = origin_data diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index d05cdff689..cf6629047e 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -21,6 +21,7 @@ class LegacyCreator(object): dynamic_subset_keys = [] log = logging.getLogger("LegacyCreator") + log.propagate = True def __init__(self, name, asset, options=None, data=None): self.name = name # For backwards compatibility diff --git a/openpype/pipeline/lib/__init__.py b/openpype/pipeline/lib/__init__.py deleted file mode 100644 index f762c4205d..0000000000 --- a/openpype/pipeline/lib/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -from .attribute_definitions import ( - AbtractAttrDef, - - UIDef, - UISeparatorDef, - UILabelDef, - - UnknownDef, - NumberDef, - TextDef, - EnumDef, - BoolDef, - FileDef, -) - - -__all__ = ( - "AbtractAttrDef", - - "UIDef", - "UISeparatorDef", - "UILabelDef", - - "UnknownDef", - "NumberDef", - "TextDef", - "EnumDef", - "BoolDef", - "FileDef", -) diff --git a/openpype/pipeline/load/__init__.py b/openpype/pipeline/load/__init__.py new file mode 100644 index 0000000000..6e7612d4c1 --- /dev/null +++ b/openpype/pipeline/load/__init__.py @@ -0,0 +1,78 @@ +from .utils import ( + HeroVersionType, + IncompatibleLoaderError, + + get_repres_contexts, + get_subset_contexts, + get_representation_context, + + load_with_repre_context, + load_with_subset_context, + load_with_subset_contexts, + + load_container, + remove_container, + update_container, + switch_container, + + get_loader_identifier, + + get_representation_path_from_context, + get_representation_path, + + is_compatible_loader, + + loaders_from_repre_context, + loaders_from_representation, +) + +from .plugins import ( + LoaderPlugin, + SubsetLoaderPlugin, + + discover_loader_plugins, + register_loader_plugin, + deregister_loader_plugin_path, + register_loader_plugin_path, + deregister_loader_plugin, +) + + +__all__ = ( + # utils.py + "HeroVersionType", + "IncompatibleLoaderError", + + "get_repres_contexts", + "get_subset_contexts", + "get_representation_context", + + "load_with_repre_context", + "load_with_subset_context", + "load_with_subset_contexts", + + "load_container", + "remove_container", + "update_container", + "switch_container", + + "get_loader_identifier", + + "get_representation_path_from_context", + "get_representation_path", + + "is_compatible_loader", + + "loaders_from_repre_context", + "loaders_from_representation", + + # plugins.py + "LoaderPlugin", + "SubsetLoaderPlugin", + + "discover_loader_plugins", + "register_loader_plugin", + "deregister_loader_plugin_path", + "register_loader_plugin_path", + "deregister_loader_plugin", +) diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py new file mode 100644 index 0000000000..9b2b6bb084 --- /dev/null +++ b/openpype/pipeline/load/plugins.py @@ -0,0 +1,131 @@ +import logging + +from .utils import get_representation_path_from_context + + +class LoaderPlugin(list): + """Load representation into host application + + Arguments: + context (dict): avalon-core:context-1.0 + name (str, optional): Use pre-defined name + namespace (str, optional): Use pre-defined namespace + + .. versionadded:: 4.0 + This class was introduced + + """ + + families = list() + representations = list() + order = 0 + is_multiple_contexts_compatible = False + + options = [] + + log = logging.getLogger("SubsetLoader") + log.propagate = True + + def __init__(self, context): + self.fname = self.filepath_from_context(context) + + @classmethod + def get_representations(cls): + return cls.representations + + def filepath_from_context(self, context): + return get_representation_path_from_context(context) + + def load(self, context, name=None, namespace=None, options=None): + """Load asset via database + + Arguments: + context (dict): Full parenthood of representation to load + name (str, optional): Use pre-defined name + namespace (str, optional): Use pre-defined namespace + options (dict, optional): Additional settings dictionary + + """ + raise NotImplementedError("Loader.load() must be " + "implemented by subclass") + + def update(self, container, representation): + """Update `container` to `representation` + + Arguments: + container (avalon-core:container-1.0): Container to update, + from `host.ls()`. + representation (dict): Update the container to this representation. + + """ + raise NotImplementedError("Loader.update() must be " + "implemented by subclass") + + def remove(self, container): + """Remove a container + + Arguments: + container (avalon-core:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted + + """ + + raise NotImplementedError("Loader.remove() must be " + "implemented by subclass") + + @classmethod + def get_options(cls, contexts): + """ + Returns static (cls) options or could collect from 'contexts'. + + Args: + contexts (list): of repre or subset contexts + Returns: + (list) + """ + return cls.options or [] + + +class SubsetLoaderPlugin(LoaderPlugin): + """Load subset into host application + Arguments: + context (dict): avalon-core:context-1.0 + name (str, optional): Use pre-defined name + namespace (str, optional): Use pre-defined namespace + """ + + def __init__(self, context): + pass + + +def discover_loader_plugins(): + import avalon.api + + return avalon.api.discover(LoaderPlugin) + + +def register_loader_plugin(plugin): + import avalon.api + + return avalon.api.register_plugin(LoaderPlugin, plugin) + + +def deregister_loader_plugin_path(path): + import avalon.api + + avalon.api.deregister_plugin_path(LoaderPlugin, path) + + +def register_loader_plugin_path(path): + import avalon.api + + return avalon.api.register_plugin_path(LoaderPlugin, path) + + +def deregister_loader_plugin(plugin): + import avalon.api + + avalon.api.deregister_plugin(LoaderPlugin, plugin) diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py new file mode 100644 index 0000000000..53ac6b626d --- /dev/null +++ b/openpype/pipeline/load/utils.py @@ -0,0 +1,707 @@ +import os +import platform +import copy +import getpass +import logging +import inspect +import numbers + +import six +from bson.objectid import ObjectId + +from avalon import io, schema +from avalon.api import Session, registered_root + +from openpype.lib import Anatomy + +log = logging.getLogger(__name__) + + +class HeroVersionType(object): + def __init__(self, version): + assert isinstance(version, numbers.Integral), ( + "Version is not an integer. \"{}\" {}".format( + version, str(type(version)) + ) + ) + self.version = version + + def __str__(self): + return str(self.version) + + def __int__(self): + return int(self.version) + + def __format__(self, format_spec): + return self.version.__format__(format_spec) + + +class IncompatibleLoaderError(ValueError): + """Error when Loader is incompatible with a representation.""" + pass + + +def get_repres_contexts(representation_ids, dbcon=None): + """Return parenthood context for representation. + + Args: + representation_ids (list): The representation ids. + dbcon (AvalonMongoDB): Mongo connection object. `avalon.io` used when + not entered. + + Returns: + dict: The full representation context by representation id. + keys are repre_id, value is dictionary with full: + asset_doc + version_doc + subset_doc + repre_doc + + """ + if not dbcon: + dbcon = io + + contexts = {} + if not representation_ids: + return contexts + + _representation_ids = [] + for repre_id in representation_ids: + if isinstance(repre_id, six.string_types): + repre_id = ObjectId(repre_id) + _representation_ids.append(repre_id) + + repre_docs = dbcon.find({ + "type": "representation", + "_id": {"$in": _representation_ids} + }) + repre_docs_by_id = {} + version_ids = set() + for repre_doc in repre_docs: + version_ids.add(repre_doc["parent"]) + repre_docs_by_id[repre_doc["_id"]] = repre_doc + + version_docs = dbcon.find({ + "type": {"$in": ["version", "hero_version"]}, + "_id": {"$in": list(version_ids)} + }) + + version_docs_by_id = {} + hero_version_docs = [] + versions_for_hero = set() + subset_ids = set() + for version_doc in version_docs: + if version_doc["type"] == "hero_version": + hero_version_docs.append(version_doc) + versions_for_hero.add(version_doc["version_id"]) + version_docs_by_id[version_doc["_id"]] = version_doc + subset_ids.add(version_doc["parent"]) + + if versions_for_hero: + _version_docs = dbcon.find({ + "type": "version", + "_id": {"$in": list(versions_for_hero)} + }) + _version_data_by_id = { + version_doc["_id"]: version_doc["data"] + for version_doc in _version_docs + } + + for hero_version_doc in hero_version_docs: + hero_version_id = hero_version_doc["_id"] + version_id = hero_version_doc["version_id"] + version_data = copy.deepcopy(_version_data_by_id[version_id]) + version_docs_by_id[hero_version_id]["data"] = version_data + + subset_docs = dbcon.find({ + "type": "subset", + "_id": {"$in": list(subset_ids)} + }) + subset_docs_by_id = {} + asset_ids = set() + for subset_doc in subset_docs: + subset_docs_by_id[subset_doc["_id"]] = subset_doc + asset_ids.add(subset_doc["parent"]) + + asset_docs = dbcon.find({ + "type": "asset", + "_id": {"$in": list(asset_ids)} + }) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = dbcon.find_one({"type": "project"}) + + for repre_id, repre_doc in repre_docs_by_id.items(): + version_doc = version_docs_by_id[repre_doc["parent"]] + subset_doc = subset_docs_by_id[version_doc["parent"]] + asset_doc = asset_docs_by_id[subset_doc["parent"]] + context = { + "project": { + "name": project_doc["name"], + "code": project_doc["data"].get("code") + }, + "asset": asset_doc, + "subset": subset_doc, + "version": version_doc, + "representation": repre_doc, + } + contexts[repre_id] = context + + return contexts + + +def get_subset_contexts(subset_ids, dbcon=None): + """Return parenthood context for subset. + + Provides context on subset granularity - less detail than + 'get_repre_contexts'. + Args: + subset_ids (list): The subset ids. + dbcon (AvalonMongoDB): Mongo connection object. `avalon.io` used when + not entered. + Returns: + dict: The full representation context by representation id. + """ + if not dbcon: + dbcon = io + + contexts = {} + if not subset_ids: + return contexts + + _subset_ids = set() + for subset_id in subset_ids: + if isinstance(subset_id, six.string_types): + subset_id = ObjectId(subset_id) + _subset_ids.add(subset_id) + + subset_docs = dbcon.find({ + "type": "subset", + "_id": {"$in": list(_subset_ids)} + }) + subset_docs_by_id = {} + asset_ids = set() + for subset_doc in subset_docs: + subset_docs_by_id[subset_doc["_id"]] = subset_doc + asset_ids.add(subset_doc["parent"]) + + asset_docs = dbcon.find({ + "type": "asset", + "_id": {"$in": list(asset_ids)} + }) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = dbcon.find_one({"type": "project"}) + + for subset_id, subset_doc in subset_docs_by_id.items(): + asset_doc = asset_docs_by_id[subset_doc["parent"]] + context = { + "project": { + "name": project_doc["name"], + "code": project_doc["data"].get("code") + }, + "asset": asset_doc, + "subset": subset_doc + } + contexts[subset_id] = context + + return contexts + + +def get_representation_context(representation): + """Return parenthood context for representation. + + Args: + representation (str or ObjectId or dict): The representation id + or full representation as returned by the database. + + Returns: + dict: The full representation context. + + """ + + assert representation is not None, "This is a bug" + + if isinstance(representation, (six.string_types, ObjectId)): + representation = io.find_one( + {"_id": ObjectId(str(representation))}) + + version, subset, asset, project = io.parenthood(representation) + + assert all([representation, version, subset, asset, project]), ( + "This is a bug" + ) + + context = { + "project": { + "name": project["name"], + "code": project["data"].get("code", '') + }, + "asset": asset, + "subset": subset, + "version": version, + "representation": representation, + } + + return context + + +def load_with_repre_context( + Loader, repre_context, namespace=None, name=None, options=None, **kwargs +): + + # Ensure the Loader is compatible for the representation + if not is_compatible_loader(Loader, repre_context): + raise IncompatibleLoaderError( + "Loader {} is incompatible with {}".format( + Loader.__name__, repre_context["subset"]["name"] + ) + ) + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + if name is None: + name = repre_context["subset"]["name"] + + log.info( + "Running '%s' on '%s'" % ( + Loader.__name__, repre_context["asset"]["name"] + ) + ) + + loader = Loader(repre_context) + return loader.load(repre_context, name, namespace, options) + + +def load_with_subset_context( + Loader, subset_context, namespace=None, name=None, options=None, **kwargs +): + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + if name is None: + name = subset_context["subset"]["name"] + + log.info( + "Running '%s' on '%s'" % ( + Loader.__name__, subset_context["asset"]["name"] + ) + ) + + loader = Loader(subset_context) + return loader.load(subset_context, name, namespace, options) + + +def load_with_subset_contexts( + Loader, subset_contexts, namespace=None, name=None, options=None, **kwargs +): + + # Ensure options is a dictionary when no explicit options provided + if options is None: + options = kwargs.get("data", dict()) # "data" for backward compat + + assert isinstance(options, dict), "Options must be a dictionary" + + # Fallback to subset when name is None + joined_subset_names = " | ".join( + context["subset"]["name"] + for context in subset_contexts + ) + if name is None: + name = joined_subset_names + + log.info( + "Running '{}' on '{}'".format(Loader.__name__, joined_subset_names) + ) + + loader = Loader(subset_contexts) + return loader.load(subset_contexts, name, namespace, options) + + +def load_container( + Loader, representation, namespace=None, name=None, options=None, **kwargs +): + """Use Loader to load a representation. + + Args: + Loader (Loader): The loader class to trigger. + representation (str or ObjectId or dict): The representation id + or full representation as returned by the database. + namespace (str, Optional): The namespace to assign. Defaults to None. + name (str, Optional): The name to assign. Defaults to subset name. + options (dict, Optional): Additional options to pass on to the loader. + + Returns: + The return of the `loader.load()` method. + + Raises: + IncompatibleLoaderError: When the loader is not compatible with + the representation. + + """ + + context = get_representation_context(representation) + return load_with_repre_context( + Loader, + context, + namespace=namespace, + name=name, + options=options, + **kwargs + ) + + +def get_loader_identifier(loader): + """Loader identifier from loader plugin or object. + + Identifier should be stored to container for future management. + """ + if not inspect.isclass(loader): + loader = loader.__class__ + return loader.__name__ + + +def _get_container_loader(container): + """Return the Loader corresponding to the container""" + from .plugins import discover_loader_plugins + + loader = container["loader"] + for Plugin in discover_loader_plugins(): + # TODO: Ensure the loader is valid + if get_loader_identifier(Plugin) == loader: + return Plugin + return None + + +def remove_container(container): + """Remove a container""" + + Loader = _get_container_loader(container) + if not Loader: + raise RuntimeError("Can't remove container. See log for details.") + + loader = Loader(get_representation_context(container["representation"])) + return loader.remove(container) + + +def update_container(container, version=-1): + """Update a container""" + + # Compute the different version from 'representation' + current_representation = io.find_one({ + "_id": ObjectId(container["representation"]) + }) + + assert current_representation is not None, "This is a bug" + + current_version, subset, asset, project = io.parenthood( + current_representation) + + if version == -1: + new_version = io.find_one({ + "type": "version", + "parent": subset["_id"] + }, sort=[("name", -1)]) + else: + if isinstance(version, HeroVersionType): + version_query = { + "parent": subset["_id"], + "type": "hero_version" + } + else: + version_query = { + "parent": subset["_id"], + "type": "version", + "name": version + } + new_version = io.find_one(version_query) + + assert new_version is not None, "This is a bug" + + new_representation = io.find_one({ + "type": "representation", + "parent": new_version["_id"], + "name": current_representation["name"] + }) + + assert new_representation is not None, "Representation wasn't found" + + path = get_representation_path(new_representation) + assert os.path.exists(path), "Path {} doesn't exist".format(path) + + # Run update on the Loader for this container + Loader = _get_container_loader(container) + if not Loader: + raise RuntimeError("Can't update container. See log for details.") + + loader = Loader(get_representation_context(container["representation"])) + return loader.update(container, new_representation) + + +def switch_container(container, representation, loader_plugin=None): + """Switch a container to representation + + Args: + container (dict): container information + representation (dict): representation data from document + + Returns: + function call + """ + + # Get the Loader for this container + if loader_plugin is None: + loader_plugin = _get_container_loader(container) + + if not loader_plugin: + raise RuntimeError("Can't switch container. See log for details.") + + if not hasattr(loader_plugin, "switch"): + # Backwards compatibility (classes without switch support + # might be better to just have "switch" raise NotImplementedError + # on the base class of Loader\ + raise RuntimeError("Loader '{}' does not support 'switch'".format( + loader_plugin.label + )) + + # Get the new representation to switch to + new_representation = io.find_one({ + "type": "representation", + "_id": representation["_id"], + }) + + new_context = get_representation_context(new_representation) + if not is_compatible_loader(loader_plugin, new_context): + raise AssertionError("Must be compatible Loader") + + loader = loader_plugin(new_context) + + return loader.switch(container, new_representation) + + +def get_representation_path_from_context(context): + """Preparation wrapper using only context as a argument""" + representation = context['representation'] + project_doc = context.get("project") + root = None + session_project = Session.get("AVALON_PROJECT") + if project_doc and project_doc["name"] != session_project: + anatomy = Anatomy(project_doc["name"]) + root = anatomy.roots + + return get_representation_path(representation, root) + + +def get_representation_path(representation, root=None, dbcon=None): + """Get filename from representation document + + There are three ways of getting the path from representation which are + tried in following sequence until successful. + 1. Get template from representation['data']['template'] and data from + representation['context']. Then format template with the data. + 2. Get template from project['config'] and format it with default data set + 3. Get representation['data']['path'] and use it directly + + Args: + representation(dict): representation document from the database + + Returns: + str: fullpath of the representation + + """ + + from openpype.lib import StringTemplate, TemplateUnsolved + + if dbcon is None: + dbcon = io + + if root is None: + root = registered_root() + + def path_from_represenation(): + try: + template = representation["data"]["template"] + except KeyError: + return None + + try: + context = representation["context"] + context["root"] = root + path = StringTemplate.format_strict_template( + template, context + ) + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + except (TemplateUnsolved, KeyError): + # Template references unavailable data + return None + + if not path: + return path + + normalized_path = os.path.normpath(path) + if os.path.exists(normalized_path): + return normalized_path + return path + + def path_from_config(): + try: + version_, subset, asset, project = dbcon.parenthood(representation) + except ValueError: + log.debug( + "Representation %s wasn't found in database, " + "like a bug" % representation["name"] + ) + return None + + try: + template = project["config"]["template"]["publish"] + except KeyError: + log.debug( + "No template in project %s, " + "likely a bug" % project["name"] + ) + return None + + # default list() in get would not discover missing parents on asset + parents = asset.get("data", {}).get("parents") + if parents is not None: + hierarchy = "/".join(parents) + + # Cannot fail, required members only + data = { + "root": root, + "project": { + "name": project["name"], + "code": project.get("data", {}).get("code") + }, + "asset": asset["name"], + "hierarchy": hierarchy, + "subset": subset["name"], + "version": version_["name"], + "representation": representation["name"], + "family": representation.get("context", {}).get("family"), + "user": dbcon.Session.get("AVALON_USER", getpass.getuser()), + "app": dbcon.Session.get("AVALON_APP", ""), + "task": dbcon.Session.get("AVALON_TASK", "") + } + + try: + template_obj = StringTemplate(template) + path = str(template_obj.format(data)) + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + + except KeyError as e: + log.debug("Template references unavailable data: %s" % e) + return None + + normalized_path = os.path.normpath(path) + if os.path.exists(normalized_path): + return normalized_path + return path + + def path_from_data(): + if "path" not in representation["data"]: + return None + + path = representation["data"]["path"] + # Force replacing backslashes with forward slashed if not on + # windows + if platform.system().lower() != "windows": + path = path.replace("\\", "/") + + if os.path.exists(path): + return os.path.normpath(path) + + dir_path, file_name = os.path.split(path) + if not os.path.exists(dir_path): + return + + base_name, ext = os.path.splitext(file_name) + file_name_items = None + if "#" in base_name: + file_name_items = [part for part in base_name.split("#") if part] + elif "%" in base_name: + file_name_items = base_name.split("%") + + if not file_name_items: + return + + filename_start = file_name_items[0] + + for _file in os.listdir(dir_path): + if _file.startswith(filename_start) and _file.endswith(ext): + return os.path.normpath(path) + + return ( + path_from_represenation() or + path_from_config() or + path_from_data() + ) + + +def is_compatible_loader(Loader, context): + """Return whether a loader is compatible with a context. + + This checks the version's families and the representation for the given + Loader. + + Returns: + bool + + """ + maj_version, _ = schema.get_schema_version(context["subset"]["schema"]) + if maj_version < 3: + families = context["version"]["data"].get("families", []) + else: + families = context["subset"]["data"]["families"] + + representation = context["representation"] + has_family = ( + "*" in Loader.families or any( + family in Loader.families for family in families + ) + ) + representations = Loader.get_representations() + has_representation = ( + "*" in representations or representation["name"] in representations + ) + return has_family and has_representation + + +def loaders_from_repre_context(loaders, repre_context): + """Return compatible loaders for by representaiton's context.""" + + return [ + loader + for loader in loaders + if is_compatible_loader(loader, repre_context) + ] + + +def loaders_from_representation(loaders, representation): + """Return all compatible loaders for a representation.""" + + context = get_representation_context(representation) + return loaders_from_repre_context(loaders, context) diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py new file mode 100644 index 0000000000..12bab83be6 --- /dev/null +++ b/openpype/pipeline/thumbnail.py @@ -0,0 +1,147 @@ +import os +import copy +import logging + +log = logging.getLogger(__name__) + + +def get_thumbnail_binary(thumbnail_entity, thumbnail_type, dbcon=None): + if not thumbnail_entity: + return + + resolvers = discover_thumbnail_resolvers() + resolvers = sorted(resolvers, key=lambda cls: cls.priority) + if dbcon is None: + from avalon import io + dbcon = io + + for Resolver in resolvers: + available_types = Resolver.thumbnail_types + if ( + thumbnail_type not in available_types + and "*" not in available_types + and ( + isinstance(available_types, (list, tuple)) + and len(available_types) == 0 + ) + ): + continue + try: + instance = Resolver(dbcon) + result = instance.process(thumbnail_entity, thumbnail_type) + if result: + return result + + except Exception: + log.warning("Resolver {0} failed durring process.".format( + Resolver.__class__.__name__, exc_info=True + )) + + +class ThumbnailResolver(object): + """Determine how to get data from thumbnail entity. + + "priority" - determines the order of processing in `get_thumbnail_binary`, + lower number is processed earlier. + "thumbnail_types" - it is expected that thumbnails will be used in more + more than one level, there is only ["thumbnail"] type at the moment + of creating this docstring but it is expected to add "ico" and "full" + in future. + """ + + priority = 100 + thumbnail_types = ["*"] + + def __init__(self, dbcon): + self._log = None + self.dbcon = dbcon + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + def process(self, thumbnail_entity, thumbnail_type): + pass + + +class TemplateResolver(ThumbnailResolver): + + priority = 90 + + def process(self, thumbnail_entity, thumbnail_type): + + if not os.environ.get("AVALON_THUMBNAIL_ROOT"): + return + + template = thumbnail_entity["data"].get("template") + if not template: + self.log.debug("Thumbnail entity does not have set template") + return + + project = self.dbcon.find_one( + {"type": "project"}, + { + "name": True, + "data.code": True + } + ) + + template_data = copy.deepcopy( + thumbnail_entity["data"].get("template_data") or {} + ) + template_data.update({ + "_id": str(thumbnail_entity["_id"]), + "thumbnail_type": thumbnail_type, + "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "project": { + "name": project["name"], + "code": project["data"].get("code") + } + }) + + try: + filepath = os.path.normpath(template.format(**template_data)) + except KeyError: + self.log.warning(( + "Missing template data keys for template <{0}> || Data: {1}" + ).format(template, str(template_data))) + return + + if not os.path.exists(filepath): + self.log.warning("File does not exist \"{0}\"".format(filepath)) + return + + with open(filepath, "rb") as _file: + content = _file.read() + + return content + + +class BinaryThumbnail(ThumbnailResolver): + def process(self, thumbnail_entity, thumbnail_type): + return thumbnail_entity["data"].get("binary_data") + + +# Thumbnail resolvers +def discover_thumbnail_resolvers(): + import avalon.api + + return avalon.api.discover(ThumbnailResolver) + + +def register_thumbnail_resolver(plugin): + import avalon.api + + return avalon.api.register_plugin(ThumbnailResolver, plugin) + + +def register_thumbnail_resolver_path(path): + import avalon.api + + return avalon.api.register_plugin_path(ThumbnailResolver, path) + + +register_thumbnail_resolver(TemplateResolver) +register_thumbnail_resolver(BinaryThumbnail) diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py index 09448d553c..95001691e2 100644 --- a/openpype/plugins/load/add_site.py +++ b/openpype/plugins/load/add_site.py @@ -1,8 +1,8 @@ -from avalon import api from openpype.modules import ModulesManager +from openpype.pipeline import load -class AddSyncSite(api.Loader): +class AddSyncSite(load.LoaderPlugin): """Add sync site to representation""" representations = ["*"] families = ["*"] diff --git a/openpype/plugins/load/copy_file.py b/openpype/plugins/load/copy_file.py index bdcb4fec79..60db094cfb 100644 --- a/openpype/plugins/load/copy_file.py +++ b/openpype/plugins/load/copy_file.py @@ -1,8 +1,8 @@ -from avalon import api from openpype.style import get_default_entity_icon_color +from openpype.pipeline import load -class CopyFile(api.Loader): +class CopyFile(load.LoaderPlugin): """Copy the published file to be pasted at the desired location""" representations = ["*"] diff --git a/openpype/plugins/load/copy_file_path.py b/openpype/plugins/load/copy_file_path.py index 2041c79f6d..565d8d1ff1 100644 --- a/openpype/plugins/load/copy_file_path.py +++ b/openpype/plugins/load/copy_file_path.py @@ -1,9 +1,9 @@ import os -from avalon import api +from openpype.pipeline import load -class CopyFilePath(api.Loader): +class CopyFilePath(load.LoaderPlugin): """Copy published file path to clipboard""" representations = ["*"] families = ["*"] diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index fb8be0ed33..692acdec02 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -8,14 +8,14 @@ import ftrack_api import qargparse from Qt import QtWidgets, QtCore -from avalon import api from avalon.api import AvalonMongoDB -import avalon.pipeline from openpype import style +from openpype.pipeline import load +from openpype.lib import StringTemplate from openpype.api import Anatomy -class DeleteOldVersions(api.SubsetLoader): +class DeleteOldVersions(load.SubsetLoaderPlugin): """Deletes specific number of old version""" is_multiple_contexts_compatible = True @@ -90,16 +90,12 @@ class DeleteOldVersions(api.SubsetLoader): try: context = representation["context"] context["root"] = anatomy.roots - path = avalon.pipeline.format_template_with_optional_keys( - context, template - ) + path = str(StringTemplate.format_template(template, context)) if "frame" in context: context["frame"] = self.sequence_splitter - sequence_path = os.path.normpath( - avalon.pipeline.format_template_with_optional_keys( - context, template - ) - ) + sequence_path = os.path.normpath(str( + StringTemplate.format_template(template, context) + )) except KeyError: # Template references unavailable data diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 1037d6dc16..04080053e3 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -3,9 +3,9 @@ from collections import defaultdict from Qt import QtWidgets, QtCore, QtGui -from avalon import api from avalon.api import AvalonMongoDB +from openpype.pipeline import load from openpype.api import Anatomy, config from openpype import resources, style @@ -20,7 +20,7 @@ from openpype.lib.delivery import ( ) -class Delivery(api.SubsetLoader): +class Delivery(load.SubsetLoaderPlugin): """Export selected versions to folder structure from Template""" is_multiple_contexts_compatible = True diff --git a/openpype/plugins/load/open_djv.py b/openpype/plugins/load/open_djv.py index 4b0e8411c8..273c77c93f 100644 --- a/openpype/plugins/load/open_djv.py +++ b/openpype/plugins/load/open_djv.py @@ -1,6 +1,6 @@ import os -from avalon import api from openpype.api import ApplicationManager +from openpype.pipeline import load def existing_djv_path(): @@ -13,7 +13,8 @@ def existing_djv_path(): return djv_list -class OpenInDJV(api.Loader): + +class OpenInDJV(load.LoaderPlugin): """Open Image Sequence with system default""" djv_list = existing_djv_path() diff --git a/openpype/plugins/load/open_file.py b/openpype/plugins/load/open_file.py index 4133a64eb3..f21cd07c7f 100644 --- a/openpype/plugins/load/open_file.py +++ b/openpype/plugins/load/open_file.py @@ -2,7 +2,7 @@ import sys import os import subprocess -from avalon import api +from openpype.pipeline import load def open(filepath): @@ -15,7 +15,7 @@ def open(filepath): subprocess.call(('xdg-open', filepath)) -class Openfile(api.Loader): +class Openfile(load.LoaderPlugin): """Open Image Sequence with system default""" families = ["render2d"] diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py index aedb5d1f2f..adffec9986 100644 --- a/openpype/plugins/load/remove_site.py +++ b/openpype/plugins/load/remove_site.py @@ -1,8 +1,8 @@ -from avalon import api from openpype.modules import ModulesManager +from openpype.pipeline import load -class RemoveSyncSite(api.Loader): +class RemoveSyncSite(load.LoaderPlugin): """Remove sync site and its files on representation""" representations = ["*"] families = ["*"] diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py index d8119846c6..6746757e5f 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -1,3 +1,4 @@ +from bson.objectid import ObjectId import pyblish.api from avalon import api, io @@ -35,7 +36,7 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): loaded_versions = [] _containers = list(host.ls()) - _repr_ids = [io.ObjectId(c["representation"]) for c in _containers] + _repr_ids = [ObjectId(c["representation"]) for c in _containers] version_by_repr = { str(doc["_id"]): doc["parent"] for doc in io.find({"_id": {"$in": _repr_ids}}, projection={"parent": 1}) @@ -46,7 +47,7 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): # may have more then one representation that are same version version = { "subsetName": con["name"], - "representation": io.ObjectId(con["representation"]), + "representation": ObjectId(con["representation"]), "version": version_by_repr[con["representation"]], # _id } loaded_versions.append(version) diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index e263edd931..b062a9c4b5 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -64,7 +64,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): data["tasks"] = tasks parents = [] visualParent = None - # do not store project"s id as visualParent (silo asset) + # do not store project"s id as visualParent if self.project is not None: if self.project["_id"] != parent["_id"]: visualParent = parent["_id"] diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index b8599454ee..3ecea1f8bd 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -13,7 +13,7 @@ import pyblish.api import openpype.api from openpype.lib import ( get_ffmpeg_tool_path, - ffprobe_streams, + get_ffprobe_streams, path_to_subprocess_arg, @@ -747,10 +747,14 @@ class ExtractReview(pyblish.api.InstancePlugin): collections = clique.assemble(files)[0] assert len(collections) == 1, "Multiple collections found." col = collections[0] - # do nothing if sequence is complete - if list(col.indexes)[0] == start_frame and \ - list(col.indexes)[-1] == end_frame and \ - col.is_contiguous(): + + # do nothing if no gap is found in input range + not_gap = True + for fr in range(start_frame, end_frame + 1): + if fr not in col.indexes: + not_gap = False + + if not_gap: return [] holes = col.holes() @@ -1146,7 +1150,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # NOTE Skipped using instance's resolution full_input_path_single_file = temp_data["full_input_path_single_file"] try: - streams = ffprobe_streams( + streams = get_ffprobe_streams( full_input_path_single_file, self.log ) except Exception as exc: @@ -1188,8 +1192,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # NOTE Setting only one of `width` or `heigth` is not allowed # - settings value can't have None but has value of 0 - output_width = output_width or output_def.get("width") or None - output_height = output_height or output_def.get("height") or None + output_width = output_def.get("width") or output_width or None + output_height = output_def.get("height") or output_height or None # Overscal color overscan_color_value = "black" diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 5442cf2211..505ae75169 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -1,7 +1,14 @@ import os import openpype.api -import openpype.lib import pyblish +from openpype.lib import ( + path_to_subprocess_arg, + get_ffmpeg_tool_path, + get_ffprobe_data, + get_ffprobe_streams, + get_ffmpeg_codec_args, + get_ffmpeg_format_args, +) class ExtractReviewSlate(openpype.api.Extractor): @@ -24,9 +31,9 @@ class ExtractReviewSlate(openpype.api.Extractor): suffix = "_slate" slate_path = inst_data.get("slateFrame") - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - slate_streams = openpype.lib.ffprobe_streams(slate_path, self.log) + slate_streams = get_ffprobe_streams(slate_path, self.log) # Try to find first stream with defined 'width' and 'height' # - this is to avoid order of streams where audio can be as first # - there may be a better way (checking `codec_type`?)+ @@ -66,7 +73,7 @@ class ExtractReviewSlate(openpype.api.Extractor): os.path.normpath(stagingdir), repre["files"]) self.log.debug("__ input_path: {}".format(input_path)) - video_streams = openpype.lib.ffprobe_streams( + video_streams = get_ffprobe_streams( input_path, self.log ) @@ -143,7 +150,7 @@ class ExtractReviewSlate(openpype.api.Extractor): else: input_args.extend(repre["outputDef"].get('input', [])) input_args.append("-loop 1 -i {}".format( - openpype.lib.path_to_subprocess_arg(slate_path) + path_to_subprocess_arg(slate_path) )) input_args.extend([ "-r {}".format(fps), @@ -157,7 +164,7 @@ class ExtractReviewSlate(openpype.api.Extractor): output_args.extend(repre["_profile"].get('output', [])) else: # Codecs are copied from source for whole input - codec_args = self.codec_args(repre) + codec_args = self._get_codec_args(repre) output_args.extend(codec_args) # make sure colors are correct @@ -216,12 +223,12 @@ class ExtractReviewSlate(openpype.api.Extractor): slate_v_path = slate_path.replace(".png", ext) output_args.append( - openpype.lib.path_to_subprocess_arg(slate_v_path) + path_to_subprocess_arg(slate_v_path) ) _remove_at_end.append(slate_v_path) slate_args = [ - openpype.lib.path_to_subprocess_arg(ffmpeg_path), + path_to_subprocess_arg(ffmpeg_path), " ".join(input_args), " ".join(output_args) ] @@ -331,7 +338,7 @@ class ExtractReviewSlate(openpype.api.Extractor): return vf_back - def codec_args(self, repre): + def _get_codec_args(self, repre): """Detect possible codec arguments from representation.""" codec_args = [] @@ -345,7 +352,7 @@ class ExtractReviewSlate(openpype.api.Extractor): try: # Get information about input file via ffprobe tool - streams = openpype.lib.ffprobe_streams(full_input_path, self.log) + ffprobe_data = get_ffprobe_data(full_input_path, self.log) except Exception: self.log.warning( "Could not get codec data from input.", @@ -353,42 +360,14 @@ class ExtractReviewSlate(openpype.api.Extractor): ) return codec_args - # Try to find first stream that is not an audio - no_audio_stream = None - for stream in streams: - if stream.get("codec_type") != "audio": - no_audio_stream = stream - break + source_ffmpeg_cmd = repre.get("ffmpeg_cmd") + codec_args.extend( + get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) + ) + codec_args.extend( + get_ffmpeg_codec_args( + ffprobe_data, source_ffmpeg_cmd, logger=self.log + ) + ) - if no_audio_stream is None: - self.log.warning(( - "Couldn't find stream that is not an audio from file \"{}\"" - ).format(full_input_path)) - return codec_args - - codec_name = no_audio_stream.get("codec_name") - if codec_name: - codec_args.append("-codec:v {}".format(codec_name)) - - profile_name = no_audio_stream.get("profile") - if profile_name: - # Rest of arguments is prores_kw specific - if codec_name == "prores_ks": - codec_tag_to_profile_map = { - "apco": "proxy", - "apcs": "lt", - "apcn": "standard", - "apch": "hq", - "ap4h": "4444", - "ap4x": "4444xq" - } - codec_tag_str = no_audio_stream.get("codec_tag_string") - if codec_tag_str: - profile = codec_tag_to_profile_map.get(codec_tag_str) - if profile: - codec_args.extend(["-profile:v", profile]) - - pix_fmt = no_audio_stream.get("pix_fmt") - if pix_fmt: - codec_args.append("-pix_fmt {}".format(pix_fmt)) return codec_args diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 60245314f4..466606d08b 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -4,6 +4,7 @@ import clique import errno import shutil +from bson.objectid import ObjectId from pymongo import InsertOne, ReplaceOne import pyblish.api from avalon import api, io, schema @@ -161,7 +162,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if old_version: new_version_id = old_version["_id"] else: - new_version_id = io.ObjectId() + new_version_id = ObjectId() new_hero_version = { "_id": new_version_id, @@ -384,7 +385,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): # Create representation else: - repre["_id"] = io.ObjectId() + repre["_id"] = ObjectId() bulk_writes.append( InsertOne(repre) ) @@ -420,7 +421,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): else: repre["old_id"] = repre["_id"] - repre["_id"] = io.ObjectId() + repre["_id"] = ObjectId() repre["type"] = "archived_representation" bulk_writes.append( InsertOne(repre) diff --git a/openpype/plugins/publish/integrate_inputlinks.py b/openpype/plugins/publish/integrate_inputlinks.py index f973dfc963..11cffc4638 100644 --- a/openpype/plugins/publish/integrate_inputlinks.py +++ b/openpype/plugins/publish/integrate_inputlinks.py @@ -1,8 +1,10 @@ - from collections import OrderedDict -from avalon import io + +from bson.objectid import ObjectId import pyblish.api +from avalon import io + class IntegrateInputLinks(pyblish.api.ContextPlugin): """Connecting version level dependency links""" @@ -104,7 +106,7 @@ class IntegrateInputLinks(pyblish.api.ContextPlugin): # future. link = OrderedDict() link["type"] = link_type - link["id"] = io.ObjectId(input_id) + link["id"] = ObjectId(input_id) link["linkedBy"] = "publish" if "inputLinks" not in version_doc["data"]: diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index e8dab089af..9b2ab9c935 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -9,17 +9,19 @@ import six import re import shutil +from bson.objectid import ObjectId from pymongo import DeleteOne, InsertOne import pyblish.api from avalon import io -from avalon.api import format_template_with_optional_keys import openpype.api from datetime import datetime # from pype.modules import ModulesManager from openpype.lib.profiles_filtering import filter_profiles from openpype.lib import ( prepare_template_data, - create_hard_link + create_hard_link, + StringTemplate, + TemplateUnsolved ) # this is needed until speedcopy for linux is fixed @@ -293,7 +295,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): bulk_writes.append(DeleteOne({"_id": repre_id})) repre["orig_id"] = repre_id - repre["_id"] = io.ObjectId() + repre["_id"] = ObjectId() repre["type"] = "archived_representation" bulk_writes.append(InsertOne(repre)) @@ -572,7 +574,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Create new id if existing representations does not match if repre_id is None: - repre_id = io.ObjectId() + repre_id = ObjectId() data = repre.get("data") or {} data.update({'path': dst, 'template': template}) @@ -781,7 +783,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): families = [instance.data["family"]] families.extend(instance.data.get("families", [])) io.update_many( - {"type": "subset", "_id": io.ObjectId(subset["_id"])}, + {"type": "subset", "_id": ObjectId(subset["_id"])}, {"$set": {"data.families": families}} ) @@ -806,7 +808,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if subset_group: io.update_many({ 'type': 'subset', - '_id': io.ObjectId(subset_id) + '_id': ObjectId(subset_id) }, {'$set': {'data.subsetGroup': subset_group}}) def _get_subset_group(self, instance): @@ -854,9 +856,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): fill_pairs = prepare_template_data(fill_pairs) try: - filled_template = \ - format_template_with_optional_keys(fill_pairs, template) - except KeyError: + filled_template = StringTemplate.format_strict_template( + template, fill_pairs + ) + except (KeyError, TemplateUnsolved): keys = [] if fill_pairs: keys = fill_pairs.keys() @@ -1052,7 +1055,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): sync_project_presets = None rec = { - "_id": io.ObjectId(), + "_id": ObjectId(), "path": path } if size: diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index 874c08064a..1f57891b84 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -5,12 +5,17 @@ import subprocess import platform import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins -import openpype.lib -from openpype.lib.vendor_bin_utils import get_fps + +from openpype.lib import ( + get_ffmpeg_tool_path, + get_ffmpeg_codec_args, + get_ffmpeg_format_args, + convert_ffprobe_fps_value, +) -ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") -ffprobe_path = openpype.lib.get_ffmpeg_tool_path("ffprobe") +ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") +ffprobe_path = get_ffmpeg_tool_path("ffprobe") FFMPEG = ( @@ -51,157 +56,6 @@ def _get_ffprobe_data(source): return json.loads(out) -def _prores_codec_args(stream_data, source_ffmpeg_cmd): - output = [] - - tags = stream_data.get("tags") or {} - encoder = tags.get("encoder") or "" - if encoder.endswith("prores_ks"): - codec_name = "prores_ks" - - elif encoder.endswith("prores_aw"): - codec_name = "prores_aw" - - else: - codec_name = "prores" - - output.extend(["-codec:v", codec_name]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - # Rest of arguments is prores_kw specific - if codec_name == "prores_ks": - codec_tag_to_profile_map = { - "apco": "proxy", - "apcs": "lt", - "apcn": "standard", - "apch": "hq", - "ap4h": "4444", - "ap4x": "4444xq" - } - codec_tag_str = stream_data.get("codec_tag_string") - if codec_tag_str: - profile = codec_tag_to_profile_map.get(codec_tag_str) - if profile: - output.extend(["-profile:v", profile]) - - return output - - -def _h264_codec_args(stream_data, source_ffmpeg_cmd): - output = ["-codec:v", "h264"] - - # Use arguments from source if are available source arguments - if source_ffmpeg_cmd: - copy_args = ( - "-crf", - "-b:v", "-vb", - "-minrate", "-minrate:", - "-maxrate", "-maxrate:", - "-bufsize", "-bufsize:" - ) - args = source_ffmpeg_cmd.split(" ") - for idx, arg in enumerate(args): - if arg in copy_args: - output.extend([arg, args[idx + 1]]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - output.extend(["-intra"]) - output.extend(["-g", "1"]) - - return output - - -def _dnxhd_codec_args(stream_data, source_ffmpeg_cmd): - output = ["-codec:v", "dnxhd"] - - # Use source profile (profiles in metadata are not usable in args directly) - profile = stream_data.get("profile") or "" - # Lower profile and replace space with underscore - cleaned_profile = profile.lower().replace(" ", "_") - dnx_profiles = { - "dnxhd", - "dnxhr_lb", - "dnxhr_sq", - "dnxhr_hq", - "dnxhr_hqx", - "dnxhr_444" - } - if cleaned_profile in dnx_profiles: - output.extend(["-profile:v", cleaned_profile]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - # Use arguments from source if are available source arguments - if source_ffmpeg_cmd: - copy_args = ( - "-b:v", "-vb", - ) - args = source_ffmpeg_cmd.split(" ") - for idx, arg in enumerate(args): - if arg in copy_args: - output.extend([arg, args[idx + 1]]) - - output.extend(["-g", "1"]) - return output - - -def _mxf_format_args(ffprobe_data, source_ffmpeg_cmd): - input_format = ffprobe_data["format"] - format_tags = input_format.get("tags") or {} - product_name = format_tags.get("product_name") or "" - output = [] - if "opatom" in product_name.lower(): - output.extend(["-f", "mxf_opatom"]) - return output - - -def get_format_args(ffprobe_data, source_ffmpeg_cmd): - input_format = ffprobe_data.get("format") or {} - if input_format.get("format_name") == "mxf": - return _mxf_format_args(ffprobe_data, source_ffmpeg_cmd) - return [] - - -def get_codec_args(ffprobe_data, source_ffmpeg_cmd): - stream_data = ffprobe_data["streams"][0] - codec_name = stream_data.get("codec_name") - # Codec "prores" - if codec_name == "prores": - return _prores_codec_args(stream_data, source_ffmpeg_cmd) - - # Codec "h264" - if codec_name == "h264": - return _h264_codec_args(stream_data, source_ffmpeg_cmd) - - # Coded DNxHD - if codec_name == "dnxhd": - return _dnxhd_codec_args(stream_data, source_ffmpeg_cmd) - - output = [] - if codec_name: - output.extend(["-codec:v", codec_name]) - - bit_rate = stream_data.get("bit_rate") - if bit_rate: - output.extend(["-b:v", bit_rate]) - - pix_fmt = stream_data.get("pix_fmt") - if pix_fmt: - output.extend(["-pix_fmt", pix_fmt]) - - output.extend(["-g", "1"]) - - return output - - class ModifiedBurnins(ffmpeg_burnins.Burnins): ''' This is modification of OTIO FFmpeg Burnin adapter. @@ -592,7 +446,9 @@ def burnins_from_data( data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE) if "fps" not in data: - data["fps"] = get_fps(stream.get("r_frame_rate", "0/0")) + data["fps"] = convert_ffprobe_fps_value( + stream.get("r_frame_rate", "0/0") + ) # Check frame start and add expression if is available if frame_start is not None: @@ -703,10 +559,10 @@ def burnins_from_data( else: ffmpeg_args.extend( - get_format_args(burnin.ffprobe_data, source_ffmpeg_cmd) + get_ffmpeg_format_args(burnin.ffprobe_data, source_ffmpeg_cmd) ) ffmpeg_args.extend( - get_codec_args(burnin.ffprobe_data, source_ffmpeg_cmd) + get_ffmpeg_codec_args(burnin.ffprobe_data, source_ffmpeg_cmd) ) # Use arguments from source if are available source arguments if source_ffmpeg_cmd: diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index 6fb6f55528..c7188b10b5 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -27,6 +27,8 @@ "ext": "exr", "xml_preset_file": "OpenEXR (16-bit fp DWAA).xml", "xml_preset_dir": "", + "export_type": "File Sequence", + "ignore_comment_attrs": false, "colorspace_out": "ACES - ACEScg", "representation_add_range": true, "representation_tags": [] diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 01831efad1..89bb41a164 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -193,6 +193,11 @@ "Administrator" ] }, + "fill_workfile_attribute": { + "enabled": false, + "custom_attribute_key": "", + "role_list": [] + }, "seed_project": { "enabled": true, "role_list": [ diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 74ecf502d1..19d9a95595 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -385,6 +385,10 @@ "optional": true, "active": true }, + "ValidateRigOutSetNodeIds": { + "enabled": true, + "allow_history_only": false + }, "ValidateCameraAttributes": { "enabled": false, "optional": true, diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index b5bc44640b..76700d605d 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -28,6 +28,10 @@ class BaseEntity: def __init__(self, schema_data, *args, **kwargs): self.schema_data = schema_data + tooltip = None + if schema_data: + tooltip = schema_data.get("tooltip") + self.tooltip = tooltip # Entity id self._id = uuid4() diff --git a/openpype/settings/entities/schemas/README.md b/openpype/settings/entities/schemas/README.md index dd7601c017..fbfd699937 100644 --- a/openpype/settings/entities/schemas/README.md +++ b/openpype/settings/entities/schemas/README.md @@ -14,6 +14,7 @@ - this keys is not allowed for all inputs as they may have not reason for that - key is validated, can be only once in hierarchy but is not required - currently there are `system settings` and `project settings` +- all entities can have set `"tooltip"` key with description which will be shown in UI ## Inner schema - GUI schemas are huge json files, to be able to split whole configuration into multiple schema there's type `schema` diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index dc88d11f61..e352f8b132 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -171,6 +171,35 @@ "label": "XML preset folder (optional)", "type": "text" }, + { + "key": "export_type", + "label": "Eport clip type", + "type": "enum", + "default": "File Sequence", + "enum_items": [ + { + "Movie": "Movie" + }, + { + "File Sequence": "File Sequence" + }, + { + "Sequence Publish": "Sequence Publish" + } + ] + + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "ignore_comment_attrs", + "label": "Ignore attributes parsed from a segment comments" + }, + { + "type": "separator" + }, { "key": "colorspace_out", "label": "Output color (imageio)", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index 6d0e2693d4..cb59e9d67e 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -589,6 +589,34 @@ } ] }, + { + "type": "dict", + "key": "fill_workfile_attribute", + "label": "Fill workfile Custom attribute", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Custom attribute must be Text type added to Task entity type" + }, + { + "type": "text", + "key": "custom_attribute_key", + "label": "Custom attribute key" + }, + { + "type": "list", + "key": "role_list", + "label": "Roles", + "object_type": "text" + } + ] + }, { "type": "dict", "key": "seed_project", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index 3bec19c3d0..9f142bad09 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -195,6 +195,9 @@ { "aces_1.1": "aces_1.1" }, + { + "aces_1.2": "aces_1.2" + }, { "custom": "custom" } @@ -457,7 +460,7 @@ { "type": "text", "key": "colourPolicy", - "label": "Colour Policy" + "label": "Colour Policy (name or path)" }, { "type": "text", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 5c17e3db2c..2e5bc64e1c 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -514,6 +514,26 @@ "label": "Validate Rig Controllers" } ] + }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ValidateRigOutSetNodeIds", + "label": "Validate Rig Out Set Node Ids", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "allow_history_only", + "label": "Allow history only" + } + ] } ] }, diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index d0461e55fb..94080e550d 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -24,7 +24,7 @@ def test_backward_compatibility(printer): from openpype.lib import get_hierarchy from openpype.lib import get_linked_assets from openpype.lib import get_latest_version - from openpype.lib import ffprobe_streams + from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tools/context_dialog/window.py b/openpype/tools/context_dialog/window.py index c8464faa3e..9e030853bf 100644 --- a/openpype/tools/context_dialog/window.py +++ b/openpype/tools/context_dialog/window.py @@ -308,7 +308,6 @@ class ContextDialog(QtWidgets.QDialog): self._validate_strict() def _set_asset_to_tasks_widget(self): - # filter None docs they are silo asset_id = self._assets_widget.get_selected_asset_id() self._tasks_widget.set_asset_id(asset_id) diff --git a/openpype/tools/launcher/actions.py b/openpype/tools/launcher/actions.py index fbaef05261..546bda1c34 100644 --- a/openpype/tools/launcher/actions.py +++ b/openpype/tools/launcher/actions.py @@ -1,6 +1,7 @@ import os -from avalon import api +from Qt import QtWidgets, QtGui + from openpype import PLUGINS_DIR from openpype import style from openpype.api import Logger, resources @@ -8,7 +9,10 @@ from openpype.lib import ( ApplictionExecutableNotFound, ApplicationLaunchFailed ) -from Qt import QtWidgets, QtGui +from openpype.pipeline import ( + LauncherAction, + register_launcher_action_path, +) def register_actions_from_paths(paths): @@ -29,14 +33,15 @@ def register_actions_from_paths(paths): print("Path was not found: {}".format(path)) continue - api.register_plugin_path(api.Action, path) + register_launcher_action_path(path) def register_config_actions(): """Register actions from the configuration for Launcher""" actions_dir = os.path.join(PLUGINS_DIR, "actions") - register_actions_from_paths([actions_dir]) + if os.path.exists(actions_dir): + register_actions_from_paths([actions_dir]) def register_environment_actions(): @@ -46,7 +51,9 @@ def register_environment_actions(): register_actions_from_paths(paths_str.split(os.pathsep)) -class ApplicationAction(api.Action): +# TODO move to 'openpype.pipeline.actions' +# - remove Qt related stuff and implement exceptions to show error in launcher +class ApplicationAction(LauncherAction): """Pype's application launcher Application action based on pype's ApplicationManager system. @@ -74,7 +81,7 @@ class ApplicationAction(api.Action): @property def log(self): if self._log is None: - self._log = Logger().get_logger(self.__class__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log def is_compatible(self, session): diff --git a/openpype/tools/launcher/lib.py b/openpype/tools/launcher/lib.py index 68c759f295..c1392b7b8f 100644 --- a/openpype/tools/launcher/lib.py +++ b/openpype/tools/launcher/lib.py @@ -1,19 +1,3 @@ -"""Utility script for updating database with configuration files - -Until assets are created entirely in the database, this script -provides a bridge between the file-based project inventory and configuration. - -- Migrating an old project: - $ python -m avalon.inventory --extract --silo-parent=f02_prod - $ python -m avalon.inventory --upload - -- Managing an existing project: - 1. Run `python -m avalon.inventory --load` - 2. Update the .inventory.toml or .config.toml - 3. Run `python -m avalon.inventory --save` - -""" - import os from Qt import QtGui import qtawesome diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py index 85d553fca4..13567e7916 100644 --- a/openpype/tools/launcher/models.py +++ b/openpype/tools/launcher/models.py @@ -8,12 +8,13 @@ import time import appdirs from Qt import QtCore, QtGui import qtawesome -from avalon import api + from openpype.lib import JSONSettingRegistry from openpype.lib.applications import ( CUSTOM_LAUNCH_APP_GROUPS, ApplicationManager ) +from openpype.pipeline import discover_launcher_actions from openpype.tools.utils.lib import ( DynamicQThread, get_project_icon, @@ -68,7 +69,7 @@ class ActionModel(QtGui.QStandardItemModel): def discover(self): """Set up Actions cache. Run this for each new project.""" # Discover all registered actions - actions = api.discover(api.Action) + actions = discover_launcher_actions() # Get available project actions and the application actions app_actions = self.get_application_actions() diff --git a/openpype/tools/libraryloader/app.py b/openpype/tools/libraryloader/app.py index 9f8845f30f..b73b415128 100644 --- a/openpype/tools/libraryloader/app.py +++ b/openpype/tools/libraryloader/app.py @@ -9,14 +9,14 @@ from openpype.tools.loader.widgets import ( ThumbnailWidget, VersionWidget, FamilyListView, - RepresentationWidget + RepresentationWidget, + SubsetWidget ) from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget from openpype.modules import ModulesManager from . import lib -from .widgets import LibrarySubsetWidget module = sys.modules[__name__] module.window = None @@ -92,7 +92,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): # --- Middle part --- # Subsets widget - subsets_widget = LibrarySubsetWidget( + subsets_widget = SubsetWidget( dbcon, self.groups_config, self.family_config_cache, @@ -448,10 +448,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): def _set_context(self, context, refresh=True): """Set the selection in the interface using a context. The context must contain `asset` data by name. - Note: Prior to setting context ensure `refresh` is triggered so that - the "silos" are listed correctly, aside from that setting the - context will force a refresh further down because it changes - the active silo and asset. + Args: context (dict): The context to apply. Returns: @@ -463,12 +460,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): return if refresh: - # Workaround: - # Force a direct (non-scheduled) refresh prior to setting the - # asset widget's silo and asset selection to ensure it's correctly - # displaying the silo tabs. Calling `window.refresh()` and directly - # `window.set_context()` the `set_context()` seems to override the - # scheduled refresh and the silo tabs are not shown. self._refresh_assets() self._assets_widget.select_asset_by_name(asset_name) diff --git a/openpype/tools/libraryloader/lib.py b/openpype/tools/libraryloader/lib.py index 6a497a6a16..182b48893a 100644 --- a/openpype/tools/libraryloader/lib.py +++ b/openpype/tools/libraryloader/lib.py @@ -1,7 +1,6 @@ import os import importlib import logging -from openpype.api import Anatomy log = logging.getLogger(__name__) @@ -20,14 +19,3 @@ def find_config(): log.info("Found %s, loading.." % config) return importlib.import_module(config) - - -class RegisteredRoots: - roots_per_project = {} - - @classmethod - def registered_root(cls, project_name): - if project_name not in cls.roots_per_project: - cls.roots_per_project[project_name] = Anatomy(project_name).roots - - return cls.roots_per_project[project_name] diff --git a/openpype/tools/libraryloader/widgets.py b/openpype/tools/libraryloader/widgets.py deleted file mode 100644 index 45f9ea2048..0000000000 --- a/openpype/tools/libraryloader/widgets.py +++ /dev/null @@ -1,18 +0,0 @@ -from Qt import QtWidgets - -from .lib import RegisteredRoots -from openpype.tools.loader.widgets import SubsetWidget - - -class LibrarySubsetWidget(SubsetWidget): - def on_copy_source(self): - """Copy formatted source path to clipboard""" - source = self.data.get("source", None) - if not source: - return - - project_name = self.dbcon.Session["AVALON_PROJECT"] - root = RegisteredRoots.registered_root(project_name) - path = source.format(root=root) - clipboard = QtWidgets.QApplication.clipboard() - clipboard.setText(path) diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index d73a977ac6..923a1fabdb 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -290,7 +290,6 @@ class LoaderWindow(QtWidgets.QDialog): subsets_model.clear() self.clear_assets_underlines() - # filter None docs they are silo asset_ids = self._assets_widget.get_selected_asset_ids() # Start loading subsets_widget.set_loading_state( @@ -381,17 +380,9 @@ class LoaderWindow(QtWidgets.QDialog): The context must contain `asset` data by name. - Note: Prior to setting context ensure `refresh` is triggered so that - the "silos" are listed correctly, aside from that setting the - context will force a refresh further down because it changes - the active silo and asset. - Args: context (dict): The context to apply. - - Returns: - None - + refrest (bool): Trigger refresh on context set. """ asset = context.get("asset", None) @@ -399,12 +390,6 @@ class LoaderWindow(QtWidgets.QDialog): return if refresh: - # Workaround: - # Force a direct (non-scheduled) refresh prior to setting the - # asset widget's silo and asset selection to ensure it's correctly - # displaying the silo tabs. Calling `window.refresh()` and directly - # `window.set_context()` the `set_context()` seems to override the - # scheduled refresh and the silo tabs are not shown. self._refresh() self._assets_widget.select_asset_by_name(asset) diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index 1007355989..6cc6fae1fb 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -7,7 +7,7 @@ from Qt import QtCore, QtGui import qtawesome from avalon import schema -from avalon.lib import HeroVersionType +from openpype.pipeline import HeroVersionType from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index f145756cc5..42fb62b632 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -1,6 +1,5 @@ import os import sys -import inspect import datetime import pprint import traceback @@ -8,9 +7,20 @@ import collections from Qt import QtWidgets, QtCore, QtGui -from avalon import api, pipeline -from avalon.lib import HeroVersionType - +from openpype.api import Anatomy +from openpype.pipeline import HeroVersionType +from openpype.pipeline.thumbnail import get_thumbnail_binary +from openpype.pipeline.load import ( + discover_loader_plugins, + SubsetLoaderPlugin, + loaders_from_repre_context, + get_repres_contexts, + get_subset_contexts, + load_with_repre_context, + load_with_subset_context, + load_with_subset_contexts, + IncompatibleLoaderError, +) from openpype.tools.utils import ( ErrorMessageBox, lib as tools_lib @@ -425,7 +435,7 @@ class SubsetWidget(QtWidgets.QWidget): # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = api.discover(api.Loader) + available_loaders = discover_loader_plugins() if self.tool_name: available_loaders = lib.remove_tool_name_from_loaders( available_loaders, self.tool_name @@ -435,7 +445,7 @@ class SubsetWidget(QtWidgets.QWidget): subset_loaders = [] for loader in available_loaders: # Skip if its a SubsetLoader. - if api.SubsetLoader in inspect.getmro(loader): + if issubclass(loader, SubsetLoaderPlugin): subset_loaders.append(loader) else: repre_loaders.append(loader) @@ -459,7 +469,7 @@ class SubsetWidget(QtWidgets.QWidget): repre_docs = repre_docs_by_version_id[version_id] for repre_doc in repre_docs: repre_context = repre_context_by_id[repre_doc["_id"]] - for loader in pipeline.loaders_from_repre_context( + for loader in loaders_from_repre_context( repre_loaders, repre_context ): @@ -515,7 +525,7 @@ class SubsetWidget(QtWidgets.QWidget): action = lib.get_no_loader_action(menu, one_item_selected) menu.addAction(action) else: - repre_contexts = pipeline.get_repres_contexts( + repre_contexts = get_repres_contexts( repre_context_by_id.keys(), self.dbcon) menu = lib.add_representation_loaders_to_menu( @@ -532,7 +542,7 @@ class SubsetWidget(QtWidgets.QWidget): self.load_started.emit() - if api.SubsetLoader in inspect.getmro(loader): + if issubclass(loader, SubsetLoaderPlugin): subset_ids = [] subset_version_docs = {} for item in items: @@ -541,8 +551,7 @@ class SubsetWidget(QtWidgets.QWidget): subset_version_docs[subset_id] = item["version_document"] # get contexts only for selected menu option - subset_contexts_by_id = pipeline.get_subset_contexts(subset_ids, - self.dbcon) + subset_contexts_by_id = get_subset_contexts(subset_ids, self.dbcon) subset_contexts = list(subset_contexts_by_id.values()) options = lib.get_options(action, loader, self, subset_contexts) @@ -575,8 +584,7 @@ class SubsetWidget(QtWidgets.QWidget): repre_ids.append(representation["_id"]) # get contexts only for selected menu option - repre_contexts = pipeline.get_repres_contexts(repre_ids, - self.dbcon) + repre_contexts = get_repres_contexts(repre_ids, self.dbcon) options = lib.get_options(action, loader, self, list(repre_contexts.values())) @@ -632,6 +640,7 @@ class VersionTextEdit(QtWidgets.QTextEdit): "source": None, "raw": None } + self._anatomy = None # Reset self.set_version(None) @@ -722,20 +731,20 @@ class VersionTextEdit(QtWidgets.QTextEdit): # Add additional actions when any text so we can assume # the version is set. if self.toPlainText().strip(): - menu.addSeparator() - action = QtWidgets.QAction("Copy source path to clipboard", - menu) + action = QtWidgets.QAction( + "Copy source path to clipboard", menu + ) action.triggered.connect(self.on_copy_source) menu.addAction(action) - action = QtWidgets.QAction("Copy raw data to clipboard", - menu) + action = QtWidgets.QAction( + "Copy raw data to clipboard", menu + ) action.triggered.connect(self.on_copy_raw) menu.addAction(action) menu.exec_(event.globalPos()) - del menu def on_copy_source(self): """Copy formatted source path to clipboard""" @@ -743,7 +752,11 @@ class VersionTextEdit(QtWidgets.QTextEdit): if not source: return - path = source.format(root=api.registered_root()) + project_name = self.dbcon.Session["AVALON_PROJECT"] + if self._anatomy is None or self._anatomy.project_name != project_name: + self._anatomy = Anatomy(project_name) + + path = source.format(root=self._anatomy.roots) clipboard = QtWidgets.QApplication.clipboard() clipboard.setText(path) @@ -763,7 +776,6 @@ class VersionTextEdit(QtWidgets.QTextEdit): class ThumbnailWidget(QtWidgets.QLabel): - aspect_ratio = (16, 9) max_width = 300 @@ -855,7 +867,7 @@ class ThumbnailWidget(QtWidgets.QLabel): if not thumbnail_ent: return - thumbnail_bin = pipeline.get_thumbnail_binary( + thumbnail_bin = get_thumbnail_binary( thumbnail_ent, "thumbnail", self.dbcon ) if not thumbnail_bin: @@ -1339,12 +1351,12 @@ class RepresentationWidget(QtWidgets.QWidget): selected_side = self._get_selected_side(point_index, rows) # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = api.discover(api.Loader) + available_loaders = discover_loader_plugins() filtered_loaders = [] for loader in available_loaders: # Skip subset loaders - if api.SubsetLoader in inspect.getmro(loader): + if issubclass(loader, SubsetLoaderPlugin): continue if ( @@ -1370,7 +1382,7 @@ class RepresentationWidget(QtWidgets.QWidget): for item in items: repre_context = repre_context_by_id[item["_id"]] - for loader in pipeline.loaders_from_repre_context( + for loader in loaders_from_repre_context( filtered_loaders, repre_context ): @@ -1426,7 +1438,7 @@ class RepresentationWidget(QtWidgets.QWidget): action = lib.get_no_loader_action(menu) menu.addAction(action) else: - repre_contexts = pipeline.get_repres_contexts( + repre_contexts = get_repres_contexts( repre_context_by_id.keys(), self.dbcon) menu = lib.add_representation_loaders_to_menu(loaders, menu, repre_contexts) @@ -1472,8 +1484,7 @@ class RepresentationWidget(QtWidgets.QWidget): repre_ids.append(item.get("_id")) - repre_contexts = pipeline.get_repres_contexts(repre_ids, - self.dbcon) + repre_contexts = get_repres_contexts(repre_ids, self.dbcon) options = lib.get_options(action, loader, self, list(repre_contexts.values())) @@ -1540,7 +1551,7 @@ def _load_representations_by_loader(loader, repre_contexts, """Loops through list of repre_contexts and loads them with one loader Args: - loader (cls of api.Loader) - not initialized yet + loader (cls of LoaderPlugin) - not initialized yet repre_contexts (dicts) - full info about selected representations (containing repre_doc, version_doc, subset_doc, project info) options (dict) - qargparse arguments to fill OptionDialog @@ -1558,12 +1569,12 @@ def _load_representations_by_loader(loader, repre_contexts, _id = repre_context["representation"]["_id"] data = data_by_repre_id.get(_id) options.update(data) - pipeline.load_with_repre_context( + load_with_repre_context( loader, repre_context, options=options ) - except pipeline.IncompatibleLoaderError as exc: + except IncompatibleLoaderError as exc: print(exc) error_info.append(( "Incompatible Loader", @@ -1612,7 +1623,7 @@ def _load_subsets_by_loader(loader, subset_contexts, options, context["version"] = subset_version_docs[context["subset"]["_id"]] try: - pipeline.load_with_subset_contexts( + load_with_subset_contexts( loader, subset_contexts, options=options @@ -1638,7 +1649,7 @@ def _load_subsets_by_loader(loader, subset_contexts, options, version_doc = subset_version_docs[subset_context["subset"]["_id"]] subset_context["version"] = version_doc try: - pipeline.load_with_subset_context( + load_with_subset_context( loader, subset_context, options=options diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index 96fc28243b..78fd51c7a3 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -2,12 +2,14 @@ from collections import defaultdict import logging import os +from bson.objectid import ObjectId import maya.cmds as cmds -from openpype.hosts.maya.api import lib - from avalon import io, api +from openpype.pipeline import remove_container +from openpype.hosts.maya.api import lib + from .vray_proxies import get_alembic_ids_cache log = logging.getLogger(__name__) @@ -156,7 +158,7 @@ def create_items_from_nodes(nodes): return asset_view_items for _id, id_nodes in id_hashes.items(): - asset = io.find_one({"_id": io.ObjectId(_id)}, + asset = io.find_one({"_id": ObjectId(_id)}, projection={"name": True}) # Skip if asset id is not found @@ -206,6 +208,6 @@ def remove_unused_looks(): for container in unused: log.info("Removing unused look container: %s", container['objectName']) - api.remove(container) + remove_container(container) log.info("Finished removing unused looks. (see log for details)") diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index b22ec95a4d..25621fc652 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -6,12 +6,19 @@ import logging import json import six +from bson.objectid import ObjectId import alembic.Abc from maya import cmds from avalon import io, api +from openpype.pipeline import ( + load_container, + loaders_from_representation, + discover_loader_plugins, + get_representation_path, +) from openpype.hosts.maya.api import lib @@ -155,7 +162,7 @@ def get_look_relationships(version_id): "name": "json"}) # Load relationships - shader_relation = api.get_representation_path(json_representation) + shader_relation = get_representation_path(json_representation) with open(shader_relation, "r") as f: relationships = json.load(f) @@ -193,8 +200,8 @@ def load_look(version_id): log.info("Using look for the first time ...") # Load file - loaders = api.loaders_from_representation(api.discover(api.Loader), - representation_id) + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation(all_loaders, representation_id) loader = next( (i for i in loaders if i.__name__ == "LookLoader"), None) if loader is None: @@ -202,7 +209,7 @@ def load_look(version_id): # Reference the look file with lib.maintained_selection(): - container_node = api.load(loader, look_representation) + container_node = load_container(loader, look_representation) # Get container members shader_nodes = lib.get_container_members(container_node) @@ -225,7 +232,7 @@ def get_latest_version(asset_id, subset): """ subset = io.find_one({"name": subset, - "parent": io.ObjectId(asset_id), + "parent": ObjectId(asset_id), "type": "subset"}) if not subset: raise RuntimeError("Subset does not exist: %s" % subset) diff --git a/openpype/tools/pyblish_pype/control.py b/openpype/tools/pyblish_pype/control.py index 6f89952c22..f657936b79 100644 --- a/openpype/tools/pyblish_pype/control.py +++ b/openpype/tools/pyblish_pype/control.py @@ -389,6 +389,9 @@ class Controller(QtCore.QObject): new_current_group_order ) + # Force update to the current state + self._set_state_by_order() + if self.collect_state == 0: self.collect_state = 1 self._current_state = ( diff --git a/openpype/tools/sceneinventory/model.py b/openpype/tools/sceneinventory/model.py index 6ec3601705..091d6ca925 100644 --- a/openpype/tools/sceneinventory/model.py +++ b/openpype/tools/sceneinventory/model.py @@ -5,11 +5,13 @@ from collections import defaultdict from Qt import QtCore, QtGui import qtawesome +from bson.objectid import ObjectId from avalon import api, io, schema -from avalon.lib import HeroVersionType +from openpype.pipeline import HeroVersionType from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item +from openpype.modules import ModulesManager from .lib import ( get_site_icons, @@ -17,8 +19,6 @@ from .lib import ( get_progress_for_repre ) -from openpype.modules import ModulesManager - class InventoryModel(TreeModel): """The model for the inventory""" @@ -300,7 +300,7 @@ class InventoryModel(TreeModel): for repre_id, group_dict in sorted(grouped.items()): group_items = group_dict["items"] # Get parenthood per group - representation = io.find_one({"_id": io.ObjectId(repre_id)}) + representation = io.find_one({"_id": ObjectId(repre_id)}) if not representation: not_found["representation"].append(group_items) not_found_ids.append(repre_id) diff --git a/openpype/tools/sceneinventory/switch_dialog.py b/openpype/tools/sceneinventory/switch_dialog.py index 93ea68beb4..252f5cde4c 100644 --- a/openpype/tools/sceneinventory/switch_dialog.py +++ b/openpype/tools/sceneinventory/switch_dialog.py @@ -2,8 +2,14 @@ import collections import logging from Qt import QtWidgets, QtCore import qtawesome +from bson.objectid import ObjectId -from avalon import io, api, pipeline +from avalon import io, pipeline +from openpype.pipeline import ( + discover_loader_plugins, + switch_container, + get_repres_contexts, +) from .widgets import ( ButtonWithMenu, @@ -141,7 +147,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): repre_ids = set() content_loaders = set() for item in self._items: - repre_ids.add(io.ObjectId(item["representation"])) + repre_ids.add(ObjectId(item["representation"])) content_loaders.add(item["loader"]) repres = list(io.find({ @@ -343,13 +349,13 @@ class SwitchAssetDialog(QtWidgets.QDialog): def _get_loaders(self, repre_ids): repre_contexts = None if repre_ids: - repre_contexts = pipeline.get_repres_contexts(repre_ids) + repre_contexts = get_repres_contexts(repre_ids) if not repre_contexts: return list() available_loaders = [] - for loader_plugin in api.discover(api.Loader): + for loader_plugin in discover_loader_plugins(): # Skip loaders without switch method if not hasattr(loader_plugin, "switch"): continue @@ -1301,7 +1307,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): repre_docs_by_parent_id_by_name[parent_id][name] = repre_doc for container in self._items: - container_repre_id = io.ObjectId(container["representation"]) + container_repre_id = ObjectId(container["representation"]) container_repre = self.content_repres[container_repre_id] container_repre_name = container_repre["name"] @@ -1352,7 +1358,7 @@ class SwitchAssetDialog(QtWidgets.QDialog): repre_doc = repres_by_name[container_repre_name] try: - api.switch(container, repre_doc, loader) + switch_container(container, repre_doc, loader) except Exception: msg = ( "Couldn't switch asset." diff --git a/openpype/tools/sceneinventory/view.py b/openpype/tools/sceneinventory/view.py index fb93faefd6..2df6d00406 100644 --- a/openpype/tools/sceneinventory/view.py +++ b/openpype/tools/sceneinventory/view.py @@ -4,11 +4,17 @@ from functools import partial from Qt import QtWidgets, QtCore import qtawesome +from bson.objectid import ObjectId -from avalon import io, api -from avalon.lib import HeroVersionType +from avalon import io from openpype import style +from openpype.pipeline import ( + HeroVersionType, + update_container, + remove_container, + discover_inventory_actions, +) from openpype.modules import ModulesManager from openpype.tools.utils.lib import ( get_progress_for_repre, @@ -74,7 +80,7 @@ class SceneInventoryView(QtWidgets.QTreeView): repre_ids = [] for item in items: - item_id = io.ObjectId(item["representation"]) + item_id = ObjectId(item["representation"]) if item_id not in repre_ids: repre_ids.append(item_id) @@ -141,7 +147,7 @@ class SceneInventoryView(QtWidgets.QTreeView): def _on_switch_to_versioned(items): repre_ids = [] for item in items: - item_id = io.ObjectId(item["representation"]) + item_id = ObjectId(item["representation"]) if item_id not in repre_ids: repre_ids.append(item_id) @@ -191,12 +197,12 @@ class SceneInventoryView(QtWidgets.QTreeView): version_doc["name"] for item in items: - repre_id = io.ObjectId(item["representation"]) + repre_id = ObjectId(item["representation"]) version_id = version_id_by_repre_id.get(repre_id) version_name = version_name_by_id.get(version_id) if version_name is not None: try: - api.update(item, version_name) + update_container(item, version_name) except AssertionError: self._show_version_error_dialog( version_name, [item] @@ -224,7 +230,7 @@ class SceneInventoryView(QtWidgets.QTreeView): def _on_update_to_latest(items): for item in items: try: - api.update(item, -1) + update_container(item, -1) except AssertionError: self._show_version_error_dialog(None, [item]) log.warning("Update failed", exc_info=True) @@ -249,7 +255,7 @@ class SceneInventoryView(QtWidgets.QTreeView): def _on_update_to_hero(items): for item in items: try: - api.update(item, HeroVersionType(-1)) + update_container(item, HeroVersionType(-1)) except AssertionError: self._show_version_error_dialog('hero', [item]) log.warning("Update failed", exc_info=True) @@ -483,7 +489,7 @@ class SceneInventoryView(QtWidgets.QTreeView): containers = containers or [dict()] # Check which action will be available in the menu - Plugins = api.discover(api.InventoryAction) + Plugins = discover_inventory_actions() compatible = [p() for p in Plugins if any(p.is_compatible(c) for c in containers)] @@ -654,7 +660,7 @@ class SceneInventoryView(QtWidgets.QTreeView): active = items[-1] # Get available versions for active representation - representation_id = io.ObjectId(active["representation"]) + representation_id = ObjectId(active["representation"]) representation = io.find_one({"_id": representation_id}) version = io.find_one({ "_id": representation["parent"] @@ -728,7 +734,7 @@ class SceneInventoryView(QtWidgets.QTreeView): version = versions_by_label[label] for item in items: try: - api.update(item, version) + update_container(item, version) except AssertionError: self._show_version_error_dialog(version, [item]) log.warning("Update failed", exc_info=True) @@ -759,7 +765,7 @@ class SceneInventoryView(QtWidgets.QTreeView): return for item in items: - api.remove(item) + remove_container(item) self.data_changed.emit() def _show_version_error_dialog(self, version, items): @@ -829,7 +835,7 @@ class SceneInventoryView(QtWidgets.QTreeView): # Trigger update to latest for item in outdated_items: try: - api.update(item, -1) + update_container(item, -1) except AssertionError: self._show_version_error_dialog(None, [item]) log.warning("Update failed", exc_info=True) diff --git a/openpype/tools/sceneinventory/window.py b/openpype/tools/sceneinventory/window.py index 83e4435015..b40fbb69e4 100644 --- a/openpype/tools/sceneinventory/window.py +++ b/openpype/tools/sceneinventory/window.py @@ -61,7 +61,7 @@ class SceneInventoryWindow(QtWidgets.QDialog): icon = qtawesome.icon("fa.refresh", color="white") refresh_button = QtWidgets.QPushButton(self) - update_all_button.setToolTip("Refresh") + refresh_button.setToolTip("Refresh") refresh_button.setIcon(icon) control_layout = QtWidgets.QHBoxLayout() diff --git a/openpype/tools/settings/settings/base.py b/openpype/tools/settings/settings/base.py index 706e2fdcf0..bd48b3a966 100644 --- a/openpype/tools/settings/settings/base.py +++ b/openpype/tools/settings/settings/base.py @@ -30,6 +30,9 @@ class BaseWidget(QtWidgets.QWidget): if not self.entity.gui_type: self.entity.on_change_callbacks.append(self._on_entity_change) + if self.entity.tooltip: + self.setToolTip(self.entity.tooltip) + self.label_widget = None self.create_ui() diff --git a/openpype/tools/settings/settings/wrapper_widgets.py b/openpype/tools/settings/settings/wrapper_widgets.py index 7370fcf945..b14a226912 100644 --- a/openpype/tools/settings/settings/wrapper_widgets.py +++ b/openpype/tools/settings/settings/wrapper_widgets.py @@ -92,7 +92,8 @@ class CollapsibleWrapper(WrapperWidget): self.content_layout = content_layout if self.collapsible: - body_widget.toggle_content(self.collapsed) + if not self.collapsed: + body_widget.toggle_content() else: body_widget.hide_toolbox(hide_content=False) diff --git a/openpype/tools/standalonepublish/widgets/model_asset.py b/openpype/tools/standalonepublish/widgets/model_asset.py index a7316a2aa7..02e9073555 100644 --- a/openpype/tools/standalonepublish/widgets/model_asset.py +++ b/openpype/tools/standalonepublish/widgets/model_asset.py @@ -35,7 +35,7 @@ def _iter_model_rows(model, class AssetModel(TreeModel): - """A model listing assets in the silo in the active project. + """A model listing assets in the active project. The assets are displayed in a treeview, they are visually parented by a `visualParent` field in the database containing an `_id` to a parent @@ -64,7 +64,7 @@ class AssetModel(TreeModel): self.refresh() - def _add_hierarchy(self, assets, parent=None, silos=None): + def _add_hierarchy(self, assets, parent=None): """Add the assets that are related to the parent as children items. This method does *not* query the database. These instead are queried @@ -72,27 +72,8 @@ class AssetModel(TreeModel): queries. Resulting in up to 10x speed increase. Args: - assets (dict): All assets in the currently active silo stored - by key/value - - Returns: - None - + assets (dict): All assets from current project. """ - if silos: - # WARNING: Silo item "_id" is set to silo value - # mainly because GUI issue with preserve selection and expanded row - # and because of easier hierarchy parenting (in "assets") - for silo in silos: - node = Node({ - "_id": silo, - "name": silo, - "label": silo, - "type": "silo" - }) - self.add_child(node, parent=parent) - self._add_hierarchy(assets, parent=node) - parent_id = parent["_id"] if parent else None current_assets = assets.get(parent_id, list()) @@ -132,27 +113,19 @@ class AssetModel(TreeModel): self.beginResetModel() - # Get all assets in current silo sorted by name + # Get all assets in current project sorted by name db_assets = self.dbcon.find({"type": "asset"}).sort("name", 1) - silos = db_assets.distinct("silo") or None - # if any silo is set to None then it's expected it should not be used - if silos and None in silos: - silos = None # Group the assets by their visual parent's id assets_by_parent = collections.defaultdict(list) for asset in db_assets: - parent_id = ( - asset.get("data", {}).get("visualParent") or - asset.get("silo") - ) + parent_id = asset.get("data", {}).get("visualParent") assets_by_parent[parent_id].append(asset) # Build the hierarchical tree items recursively self._add_hierarchy( assets_by_parent, - parent=None, - silos=silos + parent=None ) self.endResetModel() @@ -174,8 +147,6 @@ class AssetModel(TreeModel): # Allow a custom icon and custom icon color to be defined data = node.get("_document", {}).get("data", {}) icon = data.get("icon", None) - if icon is None and node.get("type") == "silo": - icon = "database" color = data.get("color", self._default_asset_icon_color) if icon is None: diff --git a/openpype/tools/standalonepublish/widgets/widget_asset.py b/openpype/tools/standalonepublish/widgets/widget_asset.py index e6b74f8f82..8b43cd7cf8 100644 --- a/openpype/tools/standalonepublish/widgets/widget_asset.py +++ b/openpype/tools/standalonepublish/widgets/widget_asset.py @@ -229,7 +229,6 @@ class AssetWidget(QtWidgets.QWidget): data = { 'project': project['name'], 'asset': asset['name'], - 'silo': asset.get("silo"), 'parents': self.get_parents(asset), 'task': task } diff --git a/openpype/tools/texture_copy/app.py b/openpype/tools/texture_copy/app.py index ceca98a082..0c3c260e51 100644 --- a/openpype/tools/texture_copy/app.py +++ b/openpype/tools/texture_copy/app.py @@ -57,7 +57,6 @@ class TextureCopy: "name": project_name, "code": project['data']['code'] }, - "silo": asset.get('silo'), "asset": asset['name'], "family": 'texture', "subset": 'Main', @@ -155,7 +154,6 @@ def texture_copy(asset, project, path): t.echo(">>> Initializing avalon session ...") os.environ["AVALON_PROJECT"] = project os.environ["AVALON_ASSET"] = asset - os.environ["AVALON_SILO"] = "" TextureCopy().process(asset, project, path) diff --git a/openpype/tools/utils/delegates.py b/openpype/tools/utils/delegates.py index 4ec6079bb7..71f817a1d7 100644 --- a/openpype/tools/utils/delegates.py +++ b/openpype/tools/utils/delegates.py @@ -6,7 +6,7 @@ import numbers import Qt from Qt import QtWidgets, QtGui, QtCore -from avalon.lib import HeroVersionType +from openpype.pipeline import HeroVersionType from .models import TreeModel from . import lib @@ -287,9 +287,5 @@ class PrettyTimeDelegate(QtWidgets.QStyledItemDelegate): """ def displayText(self, value, locale): - - if value is None: - # Ignore None value - return - - return pretty_timestamp(value) + if value is not None: + return pretty_timestamp(value) diff --git a/openpype/tools/workfiles/__init__.py b/openpype/tools/workfiles/__init__.py index cde7293931..5fbc71797d 100644 --- a/openpype/tools/workfiles/__init__.py +++ b/openpype/tools/workfiles/__init__.py @@ -1,9 +1,12 @@ +from .window import Window from .app import ( show, - Window + validate_host_requirements, ) __all__ = [ + "Window", + "show", - "Window" + "validate_host_requirements", ] diff --git a/openpype/tools/workfiles/app.py b/openpype/tools/workfiles/app.py index 63958ac57b..f0e7900cf5 100644 --- a/openpype/tools/workfiles/app.py +++ b/openpype/tools/workfiles/app.py @@ -1,40 +1,10 @@ import sys -import os -import re -import copy -import getpass -import shutil import logging -import datetime -import Qt -from Qt import QtWidgets, QtCore -from avalon import io, api +from avalon import api -from openpype import style -from openpype.tools.utils.lib import ( - qt_app_context -) -from openpype.tools.utils import PlaceholderLineEdit -from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget -from openpype.tools.utils.tasks_widget import TasksWidget -from openpype.tools.utils.delegates import PrettyTimeDelegate -from openpype.lib import ( - emit_event, - Anatomy, - get_workfile_doc, - create_workfile_doc, - save_workfile_data_to_doc, - get_workfile_template_key, - create_workdir_extra_folders, - get_system_general_anatomy_data -) -from openpype.lib.avalon_context import ( - update_current_task, - compute_session_changes -) -from .model import FilesModel -from .view import FilesView +from openpype.tools.utils import qt_app_context +from .window import Window log = logging.getLogger(__name__) @@ -42,1205 +12,6 @@ module = sys.modules[__name__] module.window = None -def build_workfile_data(session): - """Get the data required for workfile formatting from avalon `session`""" - - # Set work file data for template formatting - asset_name = session["AVALON_ASSET"] - task_name = session["AVALON_TASK"] - project_doc = io.find_one( - {"type": "project"}, - { - "name": True, - "data.code": True, - "config.tasks": True, - } - ) - - asset_doc = io.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": True, - "data.parents": True - } - ) - - task_type = asset_doc["data"]["tasks"].get(task_name, {}).get("type") - - project_task_types = project_doc["config"]["tasks"] - task_short = project_task_types.get(task_type, {}).get("short_name") - - asset_parents = asset_doc["data"]["parents"] - parent_name = project_doc["name"] - if asset_parents: - parent_name = asset_parents[-1] - - data = { - "project": { - "name": project_doc["name"], - "code": project_doc["data"].get("code") - }, - "asset": asset_name, - "task": { - "name": task_name, - "type": task_type, - "short": task_short, - }, - "parent": parent_name, - "version": 1, - "user": getpass.getuser(), - "comment": "", - "ext": None - } - - # add system general settings anatomy data - system_general_data = get_system_general_anatomy_data() - data.update(system_general_data) - - return data - - -class CommentMatcher(object): - """Use anatomy and work file data to parse comments from filenames""" - def __init__(self, anatomy, template_key, data): - - self.fname_regex = None - - template = anatomy.templates[template_key]["file"] - if "{comment}" not in template: - # Don't look for comment if template doesn't allow it - return - - # Create a regex group for extensions - extensions = api.registered_host().file_extensions() - any_extension = "(?:{})".format( - "|".join(re.escape(ext[1:]) for ext in extensions) - ) - - # Use placeholders that will never be in the filename - temp_data = copy.deepcopy(data) - temp_data["comment"] = "<>" - temp_data["version"] = "<>" - temp_data["ext"] = "<>" - - formatted = anatomy.format(temp_data) - fname_pattern = formatted[template_key]["file"] - fname_pattern = re.escape(fname_pattern) - - # Replace comment and version with something we can match with regex - replacements = { - "<>": "(.+)", - "<>": "[0-9]+", - "<>": any_extension, - } - for src, dest in replacements.items(): - fname_pattern = fname_pattern.replace(re.escape(src), dest) - - # Match from beginning to end of string to be safe - fname_pattern = "^{}$".format(fname_pattern) - - self.fname_regex = re.compile(fname_pattern) - - def parse_comment(self, filepath): - """Parse the {comment} part from a filename""" - if not self.fname_regex: - return - - fname = os.path.basename(filepath) - match = self.fname_regex.match(fname) - if match: - return match.group(1) - - -class SubversionLineEdit(QtWidgets.QWidget): - """QLineEdit with QPushButton for drop down selection of list of strings""" - def __init__(self, parent=None): - super(SubversionLineEdit, self).__init__(parent=parent) - - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.setSpacing(3) - - self._input = PlaceholderLineEdit() - self._button = QtWidgets.QPushButton("") - self._button.setFixedWidth(18) - self._menu = QtWidgets.QMenu(self) - self._button.setMenu(self._menu) - - layout.addWidget(self._input) - layout.addWidget(self._button) - - @property - def input(self): - return self._input - - def set_values(self, values): - self._update(values) - - def _on_button_clicked(self): - self._menu.exec_() - - def _on_action_clicked(self, action): - self._input.setText(action.text()) - - def _update(self, values): - """Create optional predefined subset names - - Args: - default_names(list): all predefined names - - Returns: - None - """ - - menu = self._menu - button = self._button - - state = any(values) - button.setEnabled(state) - if state is False: - return - - # Include an empty string - values = [""] + sorted(values) - - # Get and destroy the action group - group = button.findChild(QtWidgets.QActionGroup) - if group: - group.deleteLater() - - # Build new action group - group = QtWidgets.QActionGroup(button) - for name in values: - action = group.addAction(name) - menu.addAction(action) - - group.triggered.connect(self._on_action_clicked) - - -class NameWindow(QtWidgets.QDialog): - """Name Window to define a unique filename inside a root folder - - The filename will be based on the "workfile" template defined in the - project["config"]["template"]. - - """ - - def __init__(self, parent, root, anatomy, template_key, session=None): - super(NameWindow, self).__init__(parent=parent) - self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) - - self.result = None - self.host = api.registered_host() - self.root = root - self.work_file = None - - if not session: - # Fallback to active session - session = api.Session - - self.data = build_workfile_data(session) - - # Store project anatomy - self.anatomy = anatomy - self.template = anatomy.templates[template_key]["file"] - self.template_key = template_key - - # Btns widget - btns_widget = QtWidgets.QWidget(self) - - btn_ok = QtWidgets.QPushButton("Ok", btns_widget) - btn_cancel = QtWidgets.QPushButton("Cancel", btns_widget) - - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - btns_layout.addWidget(btn_ok) - btns_layout.addWidget(btn_cancel) - - # Inputs widget - inputs_widget = QtWidgets.QWidget(self) - - # Version widget - version_widget = QtWidgets.QWidget(inputs_widget) - - # Version number input - version_input = QtWidgets.QSpinBox(version_widget) - version_input.setMinimum(1) - version_input.setMaximum(9999) - - # Last version checkbox - last_version_check = QtWidgets.QCheckBox( - "Next Available Version", version_widget - ) - last_version_check.setChecked(True) - - version_layout = QtWidgets.QHBoxLayout(version_widget) - version_layout.setContentsMargins(0, 0, 0, 0) - version_layout.addWidget(version_input) - version_layout.addWidget(last_version_check) - - # Preview widget - preview_label = QtWidgets.QLabel("Preview filename", inputs_widget) - - # Subversion input - subversion = SubversionLineEdit(inputs_widget) - subversion.input.setPlaceholderText("Will be part of filename.") - - # Extensions combobox - ext_combo = QtWidgets.QComboBox(inputs_widget) - # Add styled delegate to use stylesheets - ext_delegate = QtWidgets.QStyledItemDelegate() - ext_combo.setItemDelegate(ext_delegate) - ext_combo.addItems(self.host.file_extensions()) - - # Build inputs - inputs_layout = QtWidgets.QFormLayout(inputs_widget) - # Add version only if template contains version key - # - since the version can be padded with "{version:0>4}" we only search - # for "{version". - if "{version" in self.template: - inputs_layout.addRow("Version:", version_widget) - else: - version_widget.setVisible(False) - - # Add subversion only if template contains `{comment}` - if "{comment}" in self.template: - inputs_layout.addRow("Subversion:", subversion) - - # Detect whether a {comment} is in the current filename - if so, - # preserve it by default and set it in the comment/subversion field - current_filepath = self.host.current_file() - if current_filepath: - # We match the current filename against the current session - # instead of the session where the user is saving to. - current_data = build_workfile_data(api.Session) - matcher = CommentMatcher(anatomy, template_key, current_data) - comment = matcher.parse_comment(current_filepath) - if comment: - log.info("Detected subversion comment: {}".format(comment)) - self.data["comment"] = comment - subversion.input.setText(comment) - - existing_comments = self.get_existing_comments() - subversion.set_values(existing_comments) - - else: - subversion.setVisible(False) - inputs_layout.addRow("Extension:", ext_combo) - inputs_layout.addRow("Preview:", preview_label) - - # Build layout - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.addWidget(inputs_widget) - main_layout.addWidget(btns_widget) - - # Signal callback registration - version_input.valueChanged.connect(self.on_version_spinbox_changed) - last_version_check.stateChanged.connect( - self.on_version_checkbox_changed - ) - - subversion.input.textChanged.connect(self.on_comment_changed) - ext_combo.currentIndexChanged.connect(self.on_extension_changed) - - btn_ok.pressed.connect(self.on_ok_pressed) - btn_cancel.pressed.connect(self.on_cancel_pressed) - - # Allow "Enter" key to accept the save. - btn_ok.setDefault(True) - - # Force default focus to comment, some hosts didn't automatically - # apply focus to this line edit (e.g. Houdini) - subversion.input.setFocus() - - # Store widgets - self.btn_ok = btn_ok - - self.version_widget = version_widget - - self.version_input = version_input - self.last_version_check = last_version_check - - self.preview_label = preview_label - self.subversion = subversion - self.ext_combo = ext_combo - self._ext_delegate = ext_delegate - - self.refresh() - - def get_existing_comments(self): - - matcher = CommentMatcher(self.anatomy, self.template_key, self.data) - host_extensions = set(self.host.file_extensions()) - comments = set() - if os.path.isdir(self.root): - for fname in os.listdir(self.root): - if not os.path.isfile(os.path.join(self.root, fname)): - continue - - ext = os.path.splitext(fname)[-1] - if ext not in host_extensions: - continue - - comment = matcher.parse_comment(fname) - if comment: - comments.add(comment) - - return list(comments) - - def on_version_spinbox_changed(self, value): - self.data["version"] = value - self.refresh() - - def on_version_checkbox_changed(self, _value): - self.refresh() - - def on_comment_changed(self, text): - self.data["comment"] = text - self.refresh() - - def on_extension_changed(self): - ext = self.ext_combo.currentText() - if ext == self.data["ext"]: - return - self.data["ext"] = ext - self.refresh() - - def on_ok_pressed(self): - self.result = self.work_file - self.close() - - def on_cancel_pressed(self): - self.close() - - def get_result(self): - return self.result - - def get_work_file(self): - data = copy.deepcopy(self.data) - if not data["comment"]: - data.pop("comment", None) - - data["ext"] = data["ext"][1:] - - anatomy_filled = self.anatomy.format(data) - return anatomy_filled[self.template_key]["file"] - - def refresh(self): - extensions = self.host.file_extensions() - extension = self.data["ext"] - if extension is None: - # Define saving file extension - current_file = self.host.current_file() - if current_file: - # Match the extension of current file - _, extension = os.path.splitext(current_file) - else: - extension = extensions[0] - - if extension != self.data["ext"]: - self.data["ext"] = extension - index = self.ext_combo.findText( - extension, QtCore.Qt.MatchFixedString - ) - if index >= 0: - self.ext_combo.setCurrentIndex(index) - - if not self.last_version_check.isChecked(): - self.version_input.setEnabled(True) - self.data["version"] = self.version_input.value() - - work_file = self.get_work_file() - - else: - self.version_input.setEnabled(False) - - data = copy.deepcopy(self.data) - template = str(self.template) - - if not data["comment"]: - data.pop("comment", None) - - data["ext"] = data["ext"][1:] - - version = api.last_workfile_with_version( - self.root, template, data, extensions - )[1] - - if version is None: - version = 1 - else: - version += 1 - - found_valid_version = False - # Check if next version is valid version and give a chance to try - # next 100 versions - for idx in range(100): - # Store version to data - self.data["version"] = version - - work_file = self.get_work_file() - # Safety check - path = os.path.join(self.root, work_file) - if not os.path.exists(path): - found_valid_version = True - break - - # Try next version - version += 1 - # Log warning - if idx == 0: - log.warning(( - "BUG: Function `last_workfile_with_version` " - "didn't return last version." - )) - # Raise exception if even 100 version fallback didn't help - if not found_valid_version: - raise AssertionError( - "This is a bug. Couldn't find valid version!" - ) - - self.work_file = work_file - - path_exists = os.path.exists(os.path.join(self.root, work_file)) - - self.btn_ok.setEnabled(not path_exists) - - if path_exists: - self.preview_label.setText( - "Cannot create \"{0}\" because file exists!" - "".format(work_file) - ) - else: - self.preview_label.setText( - "{0}".format(work_file) - ) - - -class FilesWidget(QtWidgets.QWidget): - """A widget displaying files that allows to save and open files.""" - file_selected = QtCore.Signal(str) - workfile_created = QtCore.Signal(str) - file_opened = QtCore.Signal() - - def __init__(self, parent=None): - super(FilesWidget, self).__init__(parent=parent) - - # Setup - self._asset_id = None - self._asset_doc = None - self._task_name = None - self._task_type = None - - # Pype's anatomy object for current project - self.anatomy = Anatomy(io.Session["AVALON_PROJECT"]) - # Template key used to get work template from anatomy templates - self.template_key = "work" - - # This is not root but workfile directory - self._workfiles_root = None - self._workdir_path = None - self.host = api.registered_host() - - # Whether to automatically select the latest modified - # file on a refresh of the files model. - self.auto_select_latest_modified = True - - # Avoid crash in Blender and store the message box - # (setting parent doesn't work as it hides the message box) - self._messagebox = None - - files_view = FilesView(self) - - # Create the Files model - extensions = set(self.host.file_extensions()) - files_model = FilesModel(file_extensions=extensions) - - # Create proxy model for files to be able sort and filter - proxy_model = QtCore.QSortFilterProxyModel() - proxy_model.setSourceModel(files_model) - proxy_model.setDynamicSortFilter(True) - proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) - - # Set up the file list tree view - files_view.setModel(proxy_model) - files_view.setSortingEnabled(True) - files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - - # Date modified delegate - time_delegate = PrettyTimeDelegate() - files_view.setItemDelegateForColumn(1, time_delegate) - files_view.setIndentation(3) # smaller indentation - - # Default to a wider first filename column it is what we mostly care - # about and the date modified is relatively small anyway. - files_view.setColumnWidth(0, 330) - - # Filtering input - filter_input = PlaceholderLineEdit(self) - filter_input.setPlaceholderText("Filter files..") - filter_input.textChanged.connect(proxy_model.setFilterFixedString) - - # Home Page - # Build buttons widget for files widget - btns_widget = QtWidgets.QWidget(self) - btn_save = QtWidgets.QPushButton("Save As", btns_widget) - btn_browse = QtWidgets.QPushButton("Browse", btns_widget) - btn_open = QtWidgets.QPushButton("Open", btns_widget) - - btns_layout = QtWidgets.QHBoxLayout(btns_widget) - btns_layout.setContentsMargins(0, 0, 0, 0) - btns_layout.addWidget(btn_open) - btns_layout.addWidget(btn_browse) - btns_layout.addWidget(btn_save) - - # Build files widgets for home page - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(0, 0, 0, 0) - main_layout.addWidget(filter_input) - main_layout.addWidget(files_view) - main_layout.addWidget(btns_widget) - - # Register signal callbacks - files_view.doubleClickedLeft.connect(self.on_open_pressed) - files_view.customContextMenuRequested.connect(self.on_context_menu) - files_view.selectionModel().selectionChanged.connect( - self.on_file_select - ) - - btn_open.pressed.connect(self.on_open_pressed) - btn_browse.pressed.connect(self.on_browse_pressed) - btn_save.pressed.connect(self.on_save_as_pressed) - - # Store attributes - self.time_delegate = time_delegate - - self.filter_input = filter_input - - self.files_view = files_view - self.files_model = files_model - - self.btns_widget = btns_widget - self.btn_open = btn_open - self.btn_browse = btn_browse - self.btn_save = btn_save - - def set_asset_task(self, asset_id, task_name, task_type): - if asset_id != self._asset_id: - self._asset_doc = None - self._asset_id = asset_id - self._task_name = task_name - self._task_type = task_type - - # Define a custom session so we can query the work root - # for a "Work area" that is not our current Session. - # This way we can browse it even before we enter it. - if self._asset_id and self._task_name and self._task_type: - session = self._get_session() - self._workdir_path = session["AVALON_WORKDIR"] - self._workfiles_root = self.host.work_root(session) - self.files_model.set_root(self._workfiles_root) - - else: - self.files_model.set_root(None) - - # Disable/Enable buttons based on available files in model - has_filenames = self.files_model.has_filenames() - self.btn_browse.setEnabled(has_filenames) - self.btn_open.setEnabled(has_filenames) - if not has_filenames: - # Manually trigger file selection - self.on_file_select() - - def _get_asset_doc(self): - if self._asset_id is None: - return None - - if self._asset_doc is None: - self._asset_doc = io.find_one({"_id": self._asset_id}) - return self._asset_doc - - def _get_session(self): - """Return a modified session for the current asset and task""" - - session = api.Session.copy() - self.template_key = get_workfile_template_key( - self._task_type, - session["AVALON_APP"], - project_name=session["AVALON_PROJECT"] - ) - changes = compute_session_changes( - session, - asset=self._get_asset_doc(), - task=self._task_name, - template_key=self.template_key - ) - session.update(changes) - - return session - - def _enter_session(self): - """Enter the asset and task session currently selected""" - - session = api.Session.copy() - changes = compute_session_changes( - session, - asset=self._get_asset_doc(), - task=self._task_name, - template_key=self.template_key - ) - if not changes: - # Return early if we're already in the right Session context - # to avoid any unwanted Task Changed callbacks to be triggered. - return - - update_current_task( - asset=self._get_asset_doc(), - task=self._task_name, - template_key=self.template_key - ) - - def open_file(self, filepath): - host = self.host - if host.has_unsaved_changes(): - result = self.save_changes_prompt() - if result is None: - # Cancel operation - return False - - # Save first if has changes - if result: - current_file = host.current_file() - if not current_file: - # If the user requested to save the current scene - # we can't actually automatically do so if the current - # file has not been saved with a name yet. So we'll have - # to opt out. - log.error("Can't save scene with no filename. Please " - "first save your work file using 'Save As'.") - return - - # Save current scene, continue to open file - host.save_file(current_file) - - self._enter_session() - host.open_file(filepath) - self.file_opened.emit() - - def save_changes_prompt(self): - self._messagebox = messagebox = QtWidgets.QMessageBox(parent=self) - messagebox.setWindowFlags(messagebox.windowFlags() | - QtCore.Qt.FramelessWindowHint) - messagebox.setIcon(messagebox.Warning) - messagebox.setWindowTitle("Unsaved Changes!") - messagebox.setText( - "There are unsaved changes to the current file." - "\nDo you want to save the changes?" - ) - messagebox.setStandardButtons( - messagebox.Yes | messagebox.No | messagebox.Cancel - ) - - result = messagebox.exec_() - if result == messagebox.Yes: - return True - if result == messagebox.No: - return False - return None - - def get_filename(self): - """Show save dialog to define filename for save or duplicate - - Returns: - str: The filename to create. - - """ - session = self._get_session() - - window = NameWindow( - parent=self, - root=self._workfiles_root, - anatomy=self.anatomy, - template_key=self.template_key, - session=session - ) - window.exec_() - - return window.get_result() - - def on_duplicate_pressed(self): - work_file = self.get_filename() - if not work_file: - return - - src = self._get_selected_filepath() - dst = os.path.join(self._workfiles_root, work_file) - shutil.copy(src, dst) - - self.workfile_created.emit(dst) - - self.refresh() - - def _get_selected_filepath(self): - """Return current filepath selected in view""" - selection = self.files_view.selectionModel() - index = selection.currentIndex() - if not index.isValid(): - return - - return index.data(self.files_model.FilePathRole) - - def on_open_pressed(self): - path = self._get_selected_filepath() - if not path: - print("No file selected to open..") - return - - self.open_file(path) - - def on_browse_pressed(self): - ext_filter = "Work File (*{0})".format( - " *".join(self.host.file_extensions()) - ) - kwargs = { - "caption": "Work Files", - "filter": ext_filter - } - if Qt.__binding__ in ("PySide", "PySide2"): - kwargs["dir"] = self._workfiles_root - else: - kwargs["directory"] = self._workfiles_root - - work_file = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0] - if work_file: - self.open_file(work_file) - - def on_save_as_pressed(self): - work_filename = self.get_filename() - if not work_filename: - return - - # Trigger before save event - emit_event( - "workfile.save.before", - {"filename": work_filename, "workdir_path": self._workdir_path}, - source="workfiles.tool" - ) - - # Make sure workfiles root is updated - # - this triggers 'workio.work_root(...)' which may change value of - # '_workfiles_root' - self.set_asset_task( - self._asset_id, self._task_name, self._task_type - ) - - # Create workfiles root folder - if not os.path.exists(self._workfiles_root): - log.debug("Initializing Work Directory: %s", self._workfiles_root) - os.makedirs(self._workfiles_root) - - # Update session if context has changed - self._enter_session() - # Prepare full path to workfile and save it - filepath = os.path.join( - os.path.normpath(self._workfiles_root), work_filename - ) - self.host.save_file(filepath) - # Create extra folders - create_workdir_extra_folders( - self._workdir_path, - api.Session["AVALON_APP"], - self._task_type, - self._task_name, - api.Session["AVALON_PROJECT"] - ) - # Trigger after save events - emit_event( - "workfile.save.after", - {"filename": work_filename, "workdir_path": self._workdir_path}, - source="workfiles.tool" - ) - - self.workfile_created.emit(filepath) - # Refresh files model - self.refresh() - - def on_file_select(self): - self.file_selected.emit(self._get_selected_filepath()) - - def refresh(self): - """Refresh listed files for current selection in the interface""" - self.files_model.refresh() - - if self.auto_select_latest_modified: - self._select_last_modified_file() - - def on_context_menu(self, point): - index = self.files_view.indexAt(point) - if not index.isValid(): - return - - is_enabled = index.data(FilesModel.IsEnabled) - if not is_enabled: - return - - menu = QtWidgets.QMenu(self) - - # Duplicate - action = QtWidgets.QAction("Duplicate", menu) - tip = "Duplicate selected file." - action.setToolTip(tip) - action.setStatusTip(tip) - action.triggered.connect(self.on_duplicate_pressed) - menu.addAction(action) - - # Show the context action menu - global_point = self.files_view.mapToGlobal(point) - action = menu.exec_(global_point) - if not action: - return - - def _select_last_modified_file(self): - """Utility function to select the file with latest date modified""" - role = self.files_model.DateModifiedRole - model = self.files_view.model() - - highest_index = None - highest = 0 - for row in range(model.rowCount()): - index = model.index(row, 0, parent=QtCore.QModelIndex()) - if not index.isValid(): - continue - - modified = index.data(role) - if modified is not None and modified > highest: - highest_index = index - highest = modified - - if highest_index: - self.files_view.setCurrentIndex(highest_index) - - -class SidePanelWidget(QtWidgets.QWidget): - save_clicked = QtCore.Signal() - - def __init__(self, parent=None): - super(SidePanelWidget, self).__init__(parent) - - details_label = QtWidgets.QLabel("Details", self) - details_input = QtWidgets.QPlainTextEdit(self) - details_input.setReadOnly(True) - - note_label = QtWidgets.QLabel("Artist note", self) - note_input = QtWidgets.QPlainTextEdit(self) - btn_note_save = QtWidgets.QPushButton("Save note", self) - - main_layout = QtWidgets.QVBoxLayout(self) - main_layout.setContentsMargins(0, 0, 0, 0) - main_layout.addWidget(details_label, 0) - main_layout.addWidget(details_input, 0) - main_layout.addWidget(note_label, 0) - main_layout.addWidget(note_input, 1) - main_layout.addWidget(btn_note_save, alignment=QtCore.Qt.AlignRight) - - note_input.textChanged.connect(self.on_note_change) - btn_note_save.clicked.connect(self.on_save_click) - - self.details_input = details_input - self.note_input = note_input - self.btn_note_save = btn_note_save - - self._orig_note = "" - self._workfile_doc = None - - def on_note_change(self): - text = self.note_input.toPlainText() - self.btn_note_save.setEnabled(self._orig_note != text) - - def on_save_click(self): - self._orig_note = self.note_input.toPlainText() - self.on_note_change() - self.save_clicked.emit() - - def set_context(self, asset_id, task_name, filepath, workfile_doc): - # Check if asset, task and file are selected - # NOTE workfile document is not requirement - enabled = bool(asset_id) and bool(task_name) and bool(filepath) - - self.details_input.setEnabled(enabled) - self.note_input.setEnabled(enabled) - self.btn_note_save.setEnabled(enabled) - - # Make sure workfile doc is overridden - self._workfile_doc = workfile_doc - # Disable inputs and remove texts if any required arguments are missing - if not enabled: - self._orig_note = "" - self.details_input.setPlainText("") - self.note_input.setPlainText("") - return - - orig_note = "" - if workfile_doc: - orig_note = workfile_doc["data"].get("note") or orig_note - - self._orig_note = orig_note - self.note_input.setPlainText(orig_note) - # Set as empty string - self.details_input.setPlainText("") - - filestat = os.stat(filepath) - size_ending_mapping = { - "KB": 1024 ** 1, - "MB": 1024 ** 2, - "GB": 1024 ** 3 - } - size = filestat.st_size - ending = "B" - for _ending, _size in size_ending_mapping.items(): - if filestat.st_size < _size: - break - size = filestat.st_size / _size - ending = _ending - - # Append html string - datetime_format = "%b %d %Y %H:%M:%S" - creation_time = datetime.datetime.fromtimestamp(filestat.st_ctime) - modification_time = datetime.datetime.fromtimestamp(filestat.st_mtime) - lines = ( - "Size:", - "{:.2f} {}".format(size, ending), - "Created:", - creation_time.strftime(datetime_format), - "Modified:", - modification_time.strftime(datetime_format) - ) - self.details_input.appendHtml("
".join(lines)) - - def get_workfile_data(self): - data = { - "note": self.note_input.toPlainText() - } - return self._workfile_doc, data - - -class Window(QtWidgets.QMainWindow): - """Work Files Window""" - title = "Work Files" - - def __init__(self, parent=None): - super(Window, self).__init__(parent=parent) - self.setWindowTitle(self.title) - window_flags = QtCore.Qt.Window | QtCore.Qt.WindowCloseButtonHint - if not parent: - window_flags |= QtCore.Qt.WindowStaysOnTopHint - self.setWindowFlags(window_flags) - - # Create pages widget and set it as central widget - pages_widget = QtWidgets.QStackedWidget(self) - self.setCentralWidget(pages_widget) - - home_page_widget = QtWidgets.QWidget(pages_widget) - home_body_widget = QtWidgets.QWidget(home_page_widget) - - assets_widget = SingleSelectAssetsWidget(io, parent=home_body_widget) - assets_widget.set_current_asset_btn_visibility(True) - - tasks_widget = TasksWidget(io, home_body_widget) - files_widget = FilesWidget(home_body_widget) - side_panel = SidePanelWidget(home_body_widget) - - pages_widget.addWidget(home_page_widget) - - # Build home - home_page_layout = QtWidgets.QVBoxLayout(home_page_widget) - home_page_layout.addWidget(home_body_widget) - - # Build home - body - body_layout = QtWidgets.QVBoxLayout(home_body_widget) - split_widget = QtWidgets.QSplitter(home_body_widget) - split_widget.addWidget(assets_widget) - split_widget.addWidget(tasks_widget) - split_widget.addWidget(files_widget) - split_widget.addWidget(side_panel) - split_widget.setSizes([255, 160, 455, 175]) - - body_layout.addWidget(split_widget) - - # Add top margin for tasks to align it visually with files as - # the files widget has a filter field which tasks does not. - tasks_widget.setContentsMargins(0, 32, 0, 0) - - # Set context after asset widget is refreshed - # - to do so it is necessary to wait until refresh is done - set_context_timer = QtCore.QTimer() - set_context_timer.setInterval(100) - - # Connect signals - set_context_timer.timeout.connect(self._on_context_set_timeout) - assets_widget.selection_changed.connect(self._on_asset_changed) - tasks_widget.task_changed.connect(self._on_task_changed) - files_widget.file_selected.connect(self.on_file_select) - files_widget.workfile_created.connect(self.on_workfile_create) - files_widget.file_opened.connect(self._on_file_opened) - side_panel.save_clicked.connect(self.on_side_panel_save) - - self._set_context_timer = set_context_timer - self.home_page_widget = home_page_widget - self.pages_widget = pages_widget - self.home_body_widget = home_body_widget - self.split_widget = split_widget - - self.assets_widget = assets_widget - self.tasks_widget = tasks_widget - self.files_widget = files_widget - self.side_panel = side_panel - - # Force focus on the open button by default, required for Houdini. - files_widget.btn_open.setFocus() - - self.resize(1200, 600) - - self._first_show = True - self._context_to_set = None - - def showEvent(self, event): - super(Window, self).showEvent(event) - if self._first_show: - self._first_show = False - self.refresh() - self.setStyleSheet(style.load_stylesheet()) - - def keyPressEvent(self, event): - """Custom keyPressEvent. - - Override keyPressEvent to do nothing so that Maya's panels won't - take focus when pressing "SHIFT" whilst mouse is over viewport or - outliner. This way users don't accidentally perform Maya commands - whilst trying to name an instance. - - """ - - def set_save_enabled(self, enabled): - self.files_widget.btn_save.setEnabled(enabled) - - def on_file_select(self, filepath): - asset_id = self.assets_widget.get_selected_asset_id() - task_name = self.tasks_widget.get_selected_task_name() - - workfile_doc = None - if asset_id and task_name and filepath: - filename = os.path.split(filepath)[1] - workfile_doc = get_workfile_doc( - asset_id, task_name, filename, io - ) - self.side_panel.set_context( - asset_id, task_name, filepath, workfile_doc - ) - - def on_workfile_create(self, filepath): - self._create_workfile_doc(filepath) - - def _on_file_opened(self): - self.close() - - def on_side_panel_save(self): - workfile_doc, data = self.side_panel.get_workfile_data() - if not workfile_doc: - filepath = self.files_widget._get_selected_filepath() - self._create_workfile_doc(filepath, force=True) - workfile_doc = self._get_current_workfile_doc() - - save_workfile_data_to_doc(workfile_doc, data, io) - - def _get_current_workfile_doc(self, filepath=None): - if filepath is None: - filepath = self.files_widget._get_selected_filepath() - task_name = self.tasks_widget.get_selected_task_name() - asset_id = self.assets_widget.get_selected_asset_id() - if not task_name or not asset_id or not filepath: - return - - filename = os.path.split(filepath)[1] - return get_workfile_doc( - asset_id, task_name, filename, io - ) - - def _create_workfile_doc(self, filepath, force=False): - workfile_doc = None - if not force: - workfile_doc = self._get_current_workfile_doc(filepath) - - if not workfile_doc: - workdir, filename = os.path.split(filepath) - asset_id = self.assets_widget.get_selected_asset_id() - asset_doc = io.find_one({"_id": asset_id}) - task_name = self.tasks_widget.get_selected_task_name() - create_workfile_doc(asset_doc, task_name, filename, workdir, io) - - def refresh(self): - # Refresh asset widget - self.assets_widget.refresh() - - self._on_task_changed() - - def set_context(self, context): - self._context_to_set = context - self._set_context_timer.start() - - def _on_context_set_timeout(self): - if self._context_to_set is None: - self._set_context_timer.stop() - return - - if self.assets_widget.refreshing: - return - - self._context_to_set, context = None, self._context_to_set - if "asset" in context: - asset_doc = io.find_one( - { - "name": context["asset"], - "type": "asset" - }, - {"_id": 1} - ) or {} - asset_id = asset_doc.get("_id") - # Select the asset - self.assets_widget.select_asset(asset_id) - self.tasks_widget.set_asset_id(asset_id) - - if "task" in context: - self.tasks_widget.select_task_name(context["task"]) - self._on_task_changed() - - def _on_asset_changed(self): - asset_id = self.assets_widget.get_selected_asset_id() - if asset_id: - self.tasks_widget.setEnabled(True) - else: - # Force disable the other widgets if no - # active selection - self.tasks_widget.setEnabled(False) - self.files_widget.setEnabled(False) - - self.tasks_widget.set_asset_id(asset_id) - - def _on_task_changed(self): - asset_id = self.assets_widget.get_selected_asset_id() - task_name = self.tasks_widget.get_selected_task_name() - task_type = self.tasks_widget.get_selected_task_type() - - asset_is_valid = asset_id is not None - self.tasks_widget.setEnabled(asset_is_valid) - - self.files_widget.setEnabled(bool(task_name) and asset_is_valid) - self.files_widget.set_asset_task(asset_id, task_name, task_type) - self.files_widget.refresh() - - def validate_host_requirements(host): if host is None: raise RuntimeError("No registered host.") @@ -1290,7 +61,6 @@ def show(root=None, debug=False, parent=None, use_context=True, save=True): if use_context: context = { "asset": api.Session["AVALON_ASSET"], - "silo": api.Session["AVALON_SILO"], "task": api.Session["AVALON_TASK"] } window.set_context(context) diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py new file mode 100644 index 0000000000..d2b8a76952 --- /dev/null +++ b/openpype/tools/workfiles/files_widget.py @@ -0,0 +1,583 @@ +import os +import logging +import shutil + +import Qt +from Qt import QtWidgets, QtCore +from avalon import io, api + +from openpype.tools.utils import PlaceholderLineEdit +from openpype.tools.utils.delegates import PrettyTimeDelegate +from openpype.lib import ( + emit_event, + Anatomy, + get_workfile_template_key, + create_workdir_extra_folders, +) +from openpype.lib.avalon_context import ( + update_current_task, + compute_session_changes +) +from .model import ( + WorkAreaFilesModel, + PublishFilesModel, + + FILEPATH_ROLE, + DATE_MODIFIED_ROLE, +) +from .save_as_dialog import SaveAsDialog +from .lib import TempPublishFiles + +log = logging.getLogger(__name__) + + +class FilesView(QtWidgets.QTreeView): + doubleClickedLeft = QtCore.Signal() + doubleClickedRight = QtCore.Signal() + + def mouseDoubleClickEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self.doubleClickedLeft.emit() + + elif event.button() == QtCore.Qt.RightButton: + self.doubleClickedRight.emit() + + return super(FilesView, self).mouseDoubleClickEvent(event) + + +class FilesWidget(QtWidgets.QWidget): + """A widget displaying files that allows to save and open files.""" + file_selected = QtCore.Signal(str) + file_opened = QtCore.Signal() + publish_file_viewed = QtCore.Signal() + workfile_created = QtCore.Signal(str) + published_visible_changed = QtCore.Signal(bool) + + def __init__(self, parent): + super(FilesWidget, self).__init__(parent) + + # Setup + self._asset_id = None + self._asset_doc = None + self._task_name = None + self._task_type = None + + # Pype's anatomy object for current project + self.anatomy = Anatomy(io.Session["AVALON_PROJECT"]) + # Template key used to get work template from anatomy templates + self.template_key = "work" + + # This is not root but workfile directory + self._workfiles_root = None + self._workdir_path = None + self.host = api.registered_host() + temp_publish_files = TempPublishFiles() + temp_publish_files.cleanup() + self._temp_publish_files = temp_publish_files + + # Whether to automatically select the latest modified + # file on a refresh of the files model. + self.auto_select_latest_modified = True + + # Avoid crash in Blender and store the message box + # (setting parent doesn't work as it hides the message box) + self._messagebox = None + + # Filtering input + filter_widget = QtWidgets.QWidget(self) + + published_checkbox = QtWidgets.QCheckBox("Published", filter_widget) + + filter_input = PlaceholderLineEdit(filter_widget) + filter_input.setPlaceholderText("Filter files..") + + filter_layout = QtWidgets.QHBoxLayout(filter_widget) + filter_layout.setContentsMargins(0, 0, 0, 0) + filter_layout.addWidget(published_checkbox, 0) + filter_layout.addWidget(filter_input, 1) + + # Create the Files models + extensions = set(self.host.file_extensions()) + + views_widget = QtWidgets.QWidget(self) + # Workarea view + workarea_files_model = WorkAreaFilesModel(extensions) + + # Create proxy model for files to be able sort and filter + workarea_proxy_model = QtCore.QSortFilterProxyModel() + workarea_proxy_model.setSourceModel(workarea_files_model) + workarea_proxy_model.setDynamicSortFilter(True) + workarea_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + # Set up the file list tree view + workarea_files_view = FilesView(views_widget) + workarea_files_view.setModel(workarea_proxy_model) + workarea_files_view.setSortingEnabled(True) + workarea_files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + # Date modified delegate + workarea_time_delegate = PrettyTimeDelegate() + workarea_files_view.setItemDelegateForColumn(1, workarea_time_delegate) + workarea_files_view.setIndentation(3) # smaller indentation + + # Default to a wider first filename column it is what we mostly care + # about and the date modified is relatively small anyway. + workarea_files_view.setColumnWidth(0, 330) + + # Publish files view + publish_files_model = PublishFilesModel(extensions, io, self.anatomy) + + publish_proxy_model = QtCore.QSortFilterProxyModel() + publish_proxy_model.setSourceModel(publish_files_model) + publish_proxy_model.setDynamicSortFilter(True) + publish_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + publish_files_view = FilesView(views_widget) + publish_files_view.setModel(publish_proxy_model) + + publish_files_view.setSortingEnabled(True) + publish_files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + + # Date modified delegate + publish_time_delegate = PrettyTimeDelegate() + publish_files_view.setItemDelegateForColumn(1, publish_time_delegate) + publish_files_view.setIndentation(3) # smaller indentation + + # Default to a wider first filename column it is what we mostly care + # about and the date modified is relatively small anyway. + publish_files_view.setColumnWidth(0, 330) + + views_layout = QtWidgets.QHBoxLayout(views_widget) + views_layout.setContentsMargins(0, 0, 0, 0) + views_layout.addWidget(workarea_files_view, 1) + views_layout.addWidget(publish_files_view, 1) + + # Home Page + # Build buttons widget for files widget + btns_widget = QtWidgets.QWidget(self) + btn_save = QtWidgets.QPushButton("Save As", btns_widget) + btn_browse = QtWidgets.QPushButton("Browse", btns_widget) + btn_open = QtWidgets.QPushButton("Open", btns_widget) + + btn_view_published = QtWidgets.QPushButton("View", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addWidget(btn_open, 1) + btns_layout.addWidget(btn_browse, 1) + btns_layout.addWidget(btn_save, 1) + btns_layout.addWidget(btn_view_published, 1) + + # Build files widgets for home page + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(filter_widget, 0) + main_layout.addWidget(views_widget, 1) + main_layout.addWidget(btns_widget, 0) + + # Register signal callbacks + published_checkbox.stateChanged.connect(self._on_published_change) + filter_input.textChanged.connect(self._on_filter_text_change) + + workarea_files_view.doubleClickedLeft.connect( + self._on_workarea_open_pressed + ) + workarea_files_view.customContextMenuRequested.connect( + self._on_workarea_context_menu + ) + workarea_files_view.selectionModel().selectionChanged.connect( + self.on_file_select + ) + publish_files_view.doubleClickedLeft.connect( + self._on_view_published_pressed + ) + + btn_open.pressed.connect(self._on_workarea_open_pressed) + btn_browse.pressed.connect(self.on_browse_pressed) + btn_save.pressed.connect(self.on_save_as_pressed) + btn_view_published.pressed.connect(self._on_view_published_pressed) + + # Store attributes + self._published_checkbox = published_checkbox + self._filter_input = filter_input + + self._workarea_time_delegate = workarea_time_delegate + self._workarea_files_view = workarea_files_view + self._workarea_files_model = workarea_files_model + self._workarea_proxy_model = workarea_proxy_model + + self._publish_time_delegate = publish_time_delegate + self._publish_files_view = publish_files_view + self._publish_files_model = publish_files_model + self._publish_proxy_model = publish_proxy_model + + self._btns_widget = btns_widget + self._btn_open = btn_open + self._btn_browse = btn_browse + self._btn_save = btn_save + self._btn_view_published = btn_view_published + + # Create a proxy widget for files widget + self.setFocusProxy(btn_open) + + # Hide publish files widgets + publish_files_view.setVisible(False) + btn_view_published.setVisible(False) + + @property + def published_enabled(self): + return self._published_checkbox.isChecked() + + def _on_published_change(self): + published_enabled = self.published_enabled + + self._workarea_files_view.setVisible(not published_enabled) + self._btn_open.setVisible(not published_enabled) + self._btn_browse.setVisible(not published_enabled) + self._btn_save.setVisible(not published_enabled) + + self._publish_files_view.setVisible(published_enabled) + self._btn_view_published.setVisible(published_enabled) + + self._update_filtering() + self._update_asset_task() + + self.published_visible_changed.emit(published_enabled) + + self._select_last_modified_file() + + def _on_filter_text_change(self): + self._update_filtering() + + def _update_filtering(self): + text = self._filter_input.text() + if self.published_enabled: + self._publish_proxy_model.setFilterFixedString(text) + else: + self._workarea_proxy_model.setFilterFixedString(text) + + def set_save_enabled(self, enabled): + self._btn_save.setEnabled(enabled) + + def set_asset_task(self, asset_id, task_name, task_type): + if asset_id != self._asset_id: + self._asset_doc = None + self._asset_id = asset_id + self._task_name = task_name + self._task_type = task_type + self._update_asset_task() + + def _update_asset_task(self): + if self.published_enabled: + self._publish_files_model.set_context( + self._asset_id, self._task_name + ) + has_valid_items = self._publish_files_model.has_valid_items() + self._btn_view_published.setEnabled(has_valid_items) + else: + # Define a custom session so we can query the work root + # for a "Work area" that is not our current Session. + # This way we can browse it even before we enter it. + if self._asset_id and self._task_name and self._task_type: + session = self._get_session() + self._workdir_path = session["AVALON_WORKDIR"] + self._workfiles_root = self.host.work_root(session) + self._workarea_files_model.set_root(self._workfiles_root) + + else: + self._workarea_files_model.set_root(None) + + # Disable/Enable buttons based on available files in model + has_valid_items = self._workarea_files_model.has_valid_items() + self._btn_browse.setEnabled(has_valid_items) + self._btn_open.setEnabled(has_valid_items) + # Manually trigger file selection + if not has_valid_items: + self.on_file_select() + + def _get_asset_doc(self): + if self._asset_id is None: + return None + + if self._asset_doc is None: + self._asset_doc = io.find_one({"_id": self._asset_id}) + return self._asset_doc + + def _get_session(self): + """Return a modified session for the current asset and task""" + + session = api.Session.copy() + self.template_key = get_workfile_template_key( + self._task_type, + session["AVALON_APP"], + project_name=session["AVALON_PROJECT"] + ) + changes = compute_session_changes( + session, + asset=self._get_asset_doc(), + task=self._task_name, + template_key=self.template_key + ) + session.update(changes) + + return session + + def _enter_session(self): + """Enter the asset and task session currently selected""" + + session = api.Session.copy() + changes = compute_session_changes( + session, + asset=self._get_asset_doc(), + task=self._task_name, + template_key=self.template_key + ) + if not changes: + # Return early if we're already in the right Session context + # to avoid any unwanted Task Changed callbacks to be triggered. + return + + update_current_task( + asset=self._get_asset_doc(), + task=self._task_name, + template_key=self.template_key + ) + + def open_file(self, filepath): + host = self.host + if host.has_unsaved_changes(): + result = self.save_changes_prompt() + if result is None: + # Cancel operation + return False + + # Save first if has changes + if result: + current_file = host.current_file() + if not current_file: + # If the user requested to save the current scene + # we can't actually automatically do so if the current + # file has not been saved with a name yet. So we'll have + # to opt out. + log.error("Can't save scene with no filename. Please " + "first save your work file using 'Save As'.") + return + + # Save current scene, continue to open file + host.save_file(current_file) + + self._enter_session() + host.open_file(filepath) + self.file_opened.emit() + + def save_changes_prompt(self): + self._messagebox = messagebox = QtWidgets.QMessageBox(parent=self) + messagebox.setWindowFlags(messagebox.windowFlags() | + QtCore.Qt.FramelessWindowHint) + messagebox.setIcon(messagebox.Warning) + messagebox.setWindowTitle("Unsaved Changes!") + messagebox.setText( + "There are unsaved changes to the current file." + "\nDo you want to save the changes?" + ) + messagebox.setStandardButtons( + messagebox.Yes | messagebox.No | messagebox.Cancel + ) + + result = messagebox.exec_() + if result == messagebox.Yes: + return True + if result == messagebox.No: + return False + return None + + def get_filename(self): + """Show save dialog to define filename for save or duplicate + + Returns: + str: The filename to create. + + """ + session = self._get_session() + + window = SaveAsDialog( + parent=self, + root=self._workfiles_root, + anatomy=self.anatomy, + template_key=self.template_key, + session=session + ) + window.exec_() + + return window.get_result() + + def on_duplicate_pressed(self): + work_file = self.get_filename() + if not work_file: + return + + src = self._get_selected_filepath() + dst = os.path.join(self._workfiles_root, work_file) + shutil.copy(src, dst) + + self.workfile_created.emit(dst) + + self.refresh() + + def _get_selected_filepath(self): + """Return current filepath selected in view""" + if self.published_enabled: + source_view = self._publish_files_view + else: + source_view = self._workarea_files_view + selection = source_view.selectionModel() + index = selection.currentIndex() + if not index.isValid(): + return + + return index.data(FILEPATH_ROLE) + + def _on_workarea_open_pressed(self): + path = self._get_selected_filepath() + if not path: + print("No file selected to open..") + return + + self.open_file(path) + + def on_browse_pressed(self): + ext_filter = "Work File (*{0})".format( + " *".join(self.host.file_extensions()) + ) + kwargs = { + "caption": "Work Files", + "filter": ext_filter + } + if Qt.__binding__ in ("PySide", "PySide2"): + kwargs["dir"] = self._workfiles_root + else: + kwargs["directory"] = self._workfiles_root + + work_file = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0] + if work_file: + self.open_file(work_file) + + def on_save_as_pressed(self): + work_filename = self.get_filename() + if not work_filename: + return + + # Trigger before save event + emit_event( + "workfile.save.before", + {"filename": work_filename, "workdir_path": self._workdir_path}, + source="workfiles.tool" + ) + + # Make sure workfiles root is updated + # - this triggers 'workio.work_root(...)' which may change value of + # '_workfiles_root' + self.set_asset_task( + self._asset_id, self._task_name, self._task_type + ) + + # Create workfiles root folder + if not os.path.exists(self._workfiles_root): + log.debug("Initializing Work Directory: %s", self._workfiles_root) + os.makedirs(self._workfiles_root) + + # Update session if context has changed + self._enter_session() + # Prepare full path to workfile and save it + filepath = os.path.join( + os.path.normpath(self._workfiles_root), work_filename + ) + self.host.save_file(filepath) + # Create extra folders + create_workdir_extra_folders( + self._workdir_path, + api.Session["AVALON_APP"], + self._task_type, + self._task_name, + api.Session["AVALON_PROJECT"] + ) + # Trigger after save events + emit_event( + "workfile.save.after", + {"filename": work_filename, "workdir_path": self._workdir_path}, + source="workfiles.tool" + ) + + self.workfile_created.emit(filepath) + # Refresh files model + self.refresh() + + def _on_view_published_pressed(self): + filepath = self._get_selected_filepath() + if not filepath or not os.path.exists(filepath): + return + item = self._temp_publish_files.add_file(filepath) + self.host.open_file(item.filepath) + self.publish_file_viewed.emit() + # Change state back to workarea + self._published_checkbox.setChecked(False) + + def on_file_select(self): + self.file_selected.emit(self._get_selected_filepath()) + + def refresh(self): + """Refresh listed files for current selection in the interface""" + if self.published_enabled: + self._publish_files_model.refresh() + else: + self._workarea_files_model.refresh() + + if self.auto_select_latest_modified: + self._select_last_modified_file() + + def _on_workarea_context_menu(self, point): + index = self._workarea_files_view.indexAt(point) + if not index.isValid(): + return + + if not index.flags() & QtCore.Qt.ItemIsEnabled: + return + + menu = QtWidgets.QMenu(self) + + # Duplicate + action = QtWidgets.QAction("Duplicate", menu) + tip = "Duplicate selected file." + action.setToolTip(tip) + action.setStatusTip(tip) + action.triggered.connect(self.on_duplicate_pressed) + menu.addAction(action) + + # Show the context action menu + global_point = self._workarea_files_view.mapToGlobal(point) + action = menu.exec_(global_point) + if not action: + return + + def _select_last_modified_file(self): + """Utility function to select the file with latest date modified""" + if self.published_enabled: + source_view = self._publish_files_view + else: + source_view = self._workarea_files_view + model = source_view.model() + + highest_index = None + highest = 0 + for row in range(model.rowCount()): + index = model.index(row, 0, parent=QtCore.QModelIndex()) + if not index.isValid(): + continue + + modified = index.data(DATE_MODIFIED_ROLE) + if modified is not None and modified > highest: + highest_index = index + highest = modified + + if highest_index: + source_view.setCurrentIndex(highest_index) diff --git a/openpype/tools/workfiles/lib.py b/openpype/tools/workfiles/lib.py new file mode 100644 index 0000000000..21a7485b7b --- /dev/null +++ b/openpype/tools/workfiles/lib.py @@ -0,0 +1,272 @@ +import os +import shutil +import uuid +import time +import json +import logging +import contextlib + +import appdirs + + +class TempPublishFilesItem(object): + """Object representing copied workfile in app temp folder. + + Args: + item_id (str): Id of item used as subfolder. + data (dict): Metadata about temp files. + directory (str): Path to directory where files are copied to. + """ + + def __init__(self, item_id, data, directory): + self._id = item_id + self._directory = directory + self._filepath = os.path.join(directory, data["filename"]) + + @property + def directory(self): + return self._directory + + @property + def filepath(self): + return self._filepath + + @property + def id(self): + return self._id + + @property + def size(self): + if os.path.exists(self.filepath): + s = os.stat(self.filepath) + return s.st_size + return 0 + + +class TempPublishFiles(object): + """Directory where published workfiles are copied when opened. + + Directory is located in appdirs on the machine. Folder contains file + with metadata about stored files. Each item in metadata has id, filename + and expiration time. When expiration time is higher then current time the + item is removed from metadata and it's files are deleted. Files of items + are stored in subfolder named by item's id. + + Metadata file can be in theory opened and modified by multiple processes, + threads at one time. For those cases is created simple lock file which + is created before modification begins and is removed when modification + ends. Existence of the file means that it should not be modified by + any other process at the same time. + + Metadata example: + ``` + { + "96050b4a-8974-4fca-8179-7c446c478d54": { + "created": 1647880725.555, + "expiration": 1647884325.555, + "filename": "cg_pigeon_workfileModeling_v025.ma" + }, + ... + } + ``` + + ## Why is this needed + Combination of more issues. Temp files are not automatically removed by + OS on windows so using tempfiles in TEMP would lead to kill disk space of + machine. There are also cases when someone wants to open multiple files + in short period of time and want to manually remove those files so keeping + track of temporary copied files in pre-defined structure is needed. + """ + minute_in_seconds = 60 + hour_in_seconds = 60 * minute_in_seconds + day_in_seconds = 24 * hour_in_seconds + + def __init__(self): + root_dir = appdirs.user_data_dir( + "published_workfiles_temp", "openpype" + ) + if not os.path.exists(root_dir): + os.makedirs(root_dir) + + metadata_path = os.path.join(root_dir, "metadata.json") + lock_path = os.path.join(root_dir, "lock.json") + + self._root_dir = root_dir + self._metadata_path = metadata_path + self._lock_path = lock_path + self._log = None + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + @property + def life_time(self): + """How long will be new item kept in temp in seconds. + + Returns: + int: Lifetime of temp item. + """ + return int(self.hour_in_seconds) + + @property + def size(self): + """File size of existing items.""" + size = 0 + for item in self.get_items(): + size += item.size + return size + + def add_file(self, src_path): + """Add workfile to temp directory. + + This will create new item and source path is copied to it's directory. + """ + filename = os.path.basename(src_path) + + item_id = str(uuid.uuid4()) + dst_dirpath = os.path.join(self._root_dir, item_id) + if not os.path.exists(dst_dirpath): + os.makedirs(dst_dirpath) + + dst_path = os.path.join(dst_dirpath, filename) + shutil.copy(src_path, dst_path) + + now = time.time() + item_data = { + "filename": filename, + "expiration": now + self.life_time, + "created": now + } + with self._modify_data() as data: + data[item_id] = item_data + + return TempPublishFilesItem(item_id, item_data, dst_dirpath) + + @contextlib.contextmanager + def _modify_data(self): + """Create lock file when data in metadata file are modified.""" + start_time = time.time() + timeout = 3 + while os.path.exists(self._lock_path): + time.sleep(0.01) + if start_time > timeout: + self.log.warning(( + "Waited for {} seconds to free lock file. Overriding lock." + ).format(timeout)) + + with open(self._lock_path, "w") as stream: + json.dump({"pid": os.getpid()}, stream) + + try: + data = self._get_data() + yield data + with open(self._metadata_path, "w") as stream: + json.dump(data, stream) + + finally: + os.remove(self._lock_path) + + def _get_data(self): + output = {} + if not os.path.exists(self._metadata_path): + return output + + try: + with open(self._metadata_path, "r") as stream: + output = json.load(stream) + except Exception: + self.log.warning("Failed to read metadata file.", exc_info=True) + return output + + def cleanup(self, check_expiration=True): + """Cleanup files based on metadata. + + Items that passed expiration are removed when this is called. Or all + files are removed when `check_expiration` is set to False. + + Args: + check_expiration (bool): All items and files are removed when set + to True. + """ + data = self._get_data() + now = time.time() + remove_ids = set() + all_ids = set() + for item_id, item_data in data.items(): + all_ids.add(item_id) + if check_expiration and now < item_data["expiration"]: + continue + + remove_ids.add(item_id) + + for item_id in remove_ids: + try: + self.remove_id(item_id) + except Exception: + self.log.warning( + "Failed to remove temp publish item \"{}\"".format( + item_id + ), + exc_info=True + ) + + # Remove unknown folders/files + for filename in os.listdir(self._root_dir): + if filename in all_ids: + continue + + full_path = os.path.join(self._root_dir, filename) + if full_path in (self._metadata_path, self._lock_path): + continue + + try: + shutil.rmtree(full_path) + except Exception: + self.log.warning( + "Couldn't remove arbitrary path \"{}\"".format(full_path), + exc_info=True + ) + + def clear(self): + self.cleanup(False) + + def get_items(self): + """Receive all items from metadata file. + + Returns: + list: Info about each item in metadata. + """ + output = [] + data = self._get_data() + for item_id, item_data in data.items(): + item_path = os.path.join(self._root_dir, item_id) + output.append(TempPublishFilesItem(item_id, item_data, item_path)) + return output + + def remove_id(self, item_id): + """Remove files of item and then remove the item from metadata.""" + filepath = os.path.join(self._root_dir, item_id) + if os.path.exists(filepath): + shutil.rmtree(filepath) + + with self._modify_data() as data: + data.pop(item_id, None) + + +def file_size_to_string(file_size): + size = 0 + size_ending_mapping = { + "KB": 1024 ** 1, + "MB": 1024 ** 2, + "GB": 1024 ** 3 + } + ending = "B" + for _ending, _size in size_ending_mapping.items(): + if file_size < _size: + break + size = file_size / _size + ending = _ending + return "{:.2f} {}".format(size, ending) diff --git a/openpype/tools/workfiles/model.py b/openpype/tools/workfiles/model.py index e9184842fc..8f9dd8c6ba 100644 --- a/openpype/tools/workfiles/model.py +++ b/openpype/tools/workfiles/model.py @@ -1,153 +1,179 @@ import os import logging -from Qt import QtCore +from Qt import QtCore, QtGui import qtawesome from openpype.style import ( get_default_entity_icon_color, get_disabled_entity_icon_color, ) - -from openpype.tools.utils.models import TreeModel, Item +from openpype.pipeline import get_representation_path log = logging.getLogger(__name__) -class FilesModel(TreeModel): - """Model listing files with specified extensions in a root folder""" - Columns = ["filename", "date"] +FILEPATH_ROLE = QtCore.Qt.UserRole + 2 +DATE_MODIFIED_ROLE = QtCore.Qt.UserRole + 3 +ITEM_ID_ROLE = QtCore.Qt.UserRole + 4 - FileNameRole = QtCore.Qt.UserRole + 2 - DateModifiedRole = QtCore.Qt.UserRole + 3 - FilePathRole = QtCore.Qt.UserRole + 4 - IsEnabled = QtCore.Qt.UserRole + 5 - def __init__(self, file_extensions, parent=None): - super(FilesModel, self).__init__(parent=parent) +class WorkAreaFilesModel(QtGui.QStandardItemModel): + """Model is looking into one folder for files with extension.""" + + def __init__(self, extensions, *args, **kwargs): + super(WorkAreaFilesModel, self).__init__(*args, **kwargs) + + self.setColumnCount(2) self._root = None - self._file_extensions = file_extensions - self._icons = { - "file": qtawesome.icon( - "fa.file-o", - color=get_default_entity_icon_color() + self._file_extensions = extensions + self._invalid_path_item = None + self._empty_root_item = None + self._file_icon = qtawesome.icon( + "fa.file-o", + color=get_default_entity_icon_color() + ) + self._invalid_item_visible = False + self._items_by_filename = {} + + def _get_invalid_path_item(self): + if self._invalid_path_item is None: + message = "Work Area does not exist. Use Save As to create it." + item = QtGui.QStandardItem(message) + icon = qtawesome.icon( + "fa.times", + color=get_disabled_entity_icon_color() ) - } + item.setData(icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + item.setColumnCount(self.columnCount()) + self._invalid_path_item = item + return self._invalid_path_item + + def _get_empty_root_item(self): + if self._empty_root_item is None: + message = "Work Area is empty." + item = QtGui.QStandardItem(message) + icon = qtawesome.icon( + "fa.times", + color=get_disabled_entity_icon_color() + ) + item.setData(icon, QtCore.Qt.DecorationRole) + item.setFlags(QtCore.Qt.NoItemFlags) + item.setColumnCount(self.columnCount()) + self._empty_root_item = item + return self._empty_root_item def set_root(self, root): + """Change directory where to look for file.""" self._root = root + if root and not os.path.exists(root): + log.debug("Work Area does not exist: {}".format(root)) self.refresh() - def _add_empty(self): - item = Item() - item.update({ - # Put a display message in 'filename' - "filename": "No files found.", - # Not-selectable - "enabled": False, - "date": None, - "filepath": None - }) - - self.add_child(item) + def _clear(self): + root_item = self.invisibleRootItem() + rows = root_item.rowCount() + if rows > 0: + if self._invalid_item_visible: + for row in range(rows): + root_item.takeRow(row) + else: + root_item.removeRows(0, rows) + self._items_by_filename = {} def refresh(self): - self.clear() - self.beginResetModel() - - root = self._root - - if not root: - self.endResetModel() - return - - if not os.path.exists(root): + """Refresh and update model items.""" + root_item = self.invisibleRootItem() + # If path is not set or does not exist then add invalid path item + if not self._root or not os.path.exists(self._root): + self._clear() # Add Work Area does not exist placeholder - log.debug("Work Area does not exist: %s", root) - message = "Work Area does not exist. Use Save As to create it." - item = Item({ - "filename": message, - "date": None, - "filepath": None, - "enabled": False, - "icon": qtawesome.icon( - "fa.times", - color=get_disabled_entity_icon_color() - ) - }) - self.add_child(item) - self.endResetModel() + item = self._get_invalid_path_item() + root_item.appendRow(item) + self._invalid_item_visible = True return - extensions = self._file_extensions + # Clear items if previous refresh set '_invalid_item_visible' to True + # - Invalid items are not stored to '_items_by_filename' so they would + # not be removed + if self._invalid_item_visible: + self._clear() - for filename in os.listdir(root): - path = os.path.join(root, filename) - if os.path.isdir(path): + # Check for new items that should be added and items that should be + # removed + new_items = [] + items_to_remove = set(self._items_by_filename.keys()) + for filename in os.listdir(self._root): + filepath = os.path.join(self._root, filename) + if os.path.isdir(filepath): continue ext = os.path.splitext(filename)[1] - if extensions and ext not in extensions: + if ext not in self._file_extensions: continue - modified = os.path.getmtime(path) + modified = os.path.getmtime(filepath) - item = Item({ - "filename": filename, - "date": modified, - "filepath": path - }) + # Use existing item or create new one + if filename in items_to_remove: + items_to_remove.remove(filename) + item = self._items_by_filename[filename] + else: + item = QtGui.QStandardItem(filename) + item.setColumnCount(self.columnCount()) + item.setFlags( + QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable + ) + item.setData(self._file_icon, QtCore.Qt.DecorationRole) + new_items.append(item) + self._items_by_filename[filename] = item + # Update data that may be different + item.setData(filepath, FILEPATH_ROLE) + item.setData(modified, DATE_MODIFIED_ROLE) - self.add_child(item) + # Add new items if there are any + if new_items: + root_item.appendRows(new_items) - if self.rowCount() == 0: - self._add_empty() + # Remove items that are no longer available + for filename in items_to_remove: + item = self._items_by_filename.pop(filename) + root_item.removeRow(item.row()) - self.endResetModel() - - def has_filenames(self): - for item in self._root_item.children(): - if item.get("enabled", True): - return True - return False - - def rowCount(self, parent=None): - if parent is None or not parent.isValid(): - parent_item = self._root_item + # Add empty root item if there are not filenames that could be shown + if root_item.rowCount() > 0: + self._invalid_item_visible = False else: - parent_item = parent.internalPointer() - return parent_item.childCount() + self._invalid_item_visible = True + item = self._get_empty_root_item() + root_item.appendRow(item) - def data(self, index, role): - if not index.isValid(): - return + def has_valid_items(self): + """Directory has files that are listed in items.""" + return not self._invalid_item_visible - if role == QtCore.Qt.DecorationRole: - # Add icon to filename column - item = index.internalPointer() - if index.column() == 0: - if item["filepath"]: - return self._icons["file"] - return item.get("icon", None) + def flags(self, index): + # Use flags of first column for all columns + if index.column() != 0: + index = self.index(index.row(), 0, index.parent()) + return super(WorkAreaFilesModel, self).flags(index) - if role == self.FileNameRole: - item = index.internalPointer() - return item["filename"] + def data(self, index, role=None): + if role is None: + role = QtCore.Qt.DisplayRole - if role == self.DateModifiedRole: - item = index.internalPointer() - return item["date"] + # Handle roles for first column + if index.column() == 1: + if role == QtCore.Qt.DecorationRole: + return None - if role == self.FilePathRole: - item = index.internalPointer() - return item["filepath"] + if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): + role = DATE_MODIFIED_ROLE + index = self.index(index.row(), 0, index.parent()) - if role == self.IsEnabled: - item = index.internalPointer() - return item.get("enabled", True) - - return super(FilesModel, self).data(index, role) + return super(WorkAreaFilesModel, self).data(index, role) def headerData(self, section, orientation, role): # Show nice labels in the header @@ -160,4 +186,274 @@ class FilesModel(TreeModel): elif section == 1: return "Date modified" - return super(FilesModel, self).headerData(section, orientation, role) + return super(WorkAreaFilesModel, self).headerData( + section, orientation, role + ) + + +class PublishFilesModel(QtGui.QStandardItemModel): + """Model filling files with published files calculated from representation. + + This model looks for workfile family representations based on selected + asset and task. + + Asset must set to be able look for representations that could be used. + Task is used to filter representations by task. + Model has few filter criteria for filling. + - First criteria is that version document must have "workfile" in + "data.families". + - Second cirteria is that representation must have extension same as + defined extensions + - If task is set then representation must have 'task["name"]' with same + name. + """ + + def __init__(self, extensions, dbcon, anatomy, *args, **kwargs): + super(PublishFilesModel, self).__init__(*args, **kwargs) + + self.setColumnCount(2) + + self._dbcon = dbcon + self._anatomy = anatomy + self._file_extensions = extensions + + self._invalid_context_item = None + self._empty_root_item = None + self._file_icon = qtawesome.icon( + "fa.file-o", + color=get_default_entity_icon_color() + ) + self._invalid_icon = qtawesome.icon( + "fa.times", + color=get_disabled_entity_icon_color() + ) + self._invalid_item_visible = False + + self._items_by_id = {} + + self._asset_id = None + self._task_name = None + + def _set_item_invalid(self, item): + item.setFlags(QtCore.Qt.NoItemFlags) + item.setData(self._invalid_icon, QtCore.Qt.DecorationRole) + + def _set_item_valid(self, item): + item.setFlags( + QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable + ) + item.setData(self._file_icon, QtCore.Qt.DecorationRole) + + def _get_invalid_context_item(self): + if self._invalid_context_item is None: + item = QtGui.QStandardItem("Selected context is not valid.") + item.setColumnCount(self.columnCount()) + self._set_item_invalid(item) + self._invalid_context_item = item + return self._invalid_context_item + + def _get_empty_root_item(self): + if self._empty_root_item is None: + item = QtGui.QStandardItem("Didn't find any published workfiles.") + item.setColumnCount(self.columnCount()) + self._set_item_invalid(item) + self._empty_root_item = item + return self._empty_root_item + + def set_context(self, asset_id, task_name): + """Change context to asset and task. + + Args: + asset_id (ObjectId): Id of selected asset. + task_name (str): Name of selected task. + """ + self._asset_id = asset_id + self._task_name = task_name + self.refresh() + + def _clear(self): + root_item = self.invisibleRootItem() + rows = root_item.rowCount() + if rows > 0: + if self._invalid_item_visible: + for row in range(rows): + root_item.takeRow(row) + else: + root_item.removeRows(0, rows) + self._items_by_id = {} + + def _get_workfie_representations(self): + output = [] + # Get subset docs of asset + subset_docs = self._dbcon.find( + { + "type": "subset", + "parent": self._asset_id + }, + { + "_id": True, + "name": True + } + ) + + subset_ids = [subset_doc["_id"] for subset_doc in subset_docs] + if not subset_ids: + return output + + # Get version docs of subsets with their families + version_docs = self._dbcon.find( + { + "type": "version", + "parent": {"$in": subset_ids} + }, + { + "_id": True, + "data.families": True, + "parent": True + } + ) + # Filter versions if they contain 'workfile' family + filtered_versions = [] + for version_doc in version_docs: + data = version_doc.get("data") or {} + families = data.get("families") or [] + if "workfile" in families: + filtered_versions.append(version_doc) + + version_ids = [version_doc["_id"] for version_doc in filtered_versions] + if not version_ids: + return output + + # Query representations of filtered versions and add filter for + # extension + extensions = [ext.replace(".", "") for ext in self._file_extensions] + repre_docs = self._dbcon.find( + { + "type": "representation", + "parent": {"$in": version_ids}, + "context.ext": {"$in": extensions} + } + ) + # Filter queried representations by task name if task is set + filtered_repre_docs = [] + for repre_doc in repre_docs: + if self._task_name is None: + filtered_repre_docs.append(repre_doc) + continue + + task_info = repre_doc["context"].get("task") + if not task_info: + print("Not task info") + continue + + if isinstance(task_info, dict): + task_name = task_info.get("name") + else: + task_name = task_info + + if task_name == self._task_name: + filtered_repre_docs.append(repre_doc) + + # Collect paths of representations + for repre_doc in filtered_repre_docs: + path = get_representation_path( + repre_doc, root=self._anatomy.roots + ) + output.append((path, repre_doc["_id"])) + return output + + def refresh(self): + root_item = self.invisibleRootItem() + if not self._asset_id: + self._clear() + # Add Work Area does not exist placeholder + item = self._get_invalid_context_item() + root_item.appendRow(item) + self._invalid_item_visible = True + return + + if self._invalid_item_visible: + self._clear() + + new_items = [] + items_to_remove = set(self._items_by_id.keys()) + for item in self._get_workfie_representations(): + filepath, repre_id = item + # TODO handle empty filepaths + if not filepath: + continue + filename = os.path.basename(filepath) + + if repre_id in items_to_remove: + items_to_remove.remove(repre_id) + item = self._items_by_id[repre_id] + else: + item = QtGui.QStandardItem(filename) + item.setColumnCount(self.columnCount()) + new_items.append(item) + self._items_by_id[repre_id] = item + + if os.path.exists(filepath): + modified = os.path.getmtime(filepath) + tooltip = None + self._set_item_valid(item) + else: + modified = None + tooltip = "File is not available from this machine" + self._set_item_invalid(item) + + item.setData(tooltip, QtCore.Qt.ToolTipRole) + item.setData(filepath, FILEPATH_ROLE) + item.setData(modified, DATE_MODIFIED_ROLE) + item.setData(repre_id, ITEM_ID_ROLE) + + if new_items: + root_item.appendRows(new_items) + + for filename in items_to_remove: + item = self._items_by_id.pop(filename) + root_item.removeRow(item.row()) + + if root_item.rowCount() > 0: + self._invalid_item_visible = False + else: + self._invalid_item_visible = True + item = self._get_empty_root_item() + root_item.appendRow(item) + + def has_valid_items(self): + return not self._invalid_item_visible + + def flags(self, index): + if index.column() != 0: + index = self.index(index.row(), 0, index.parent()) + return super(PublishFilesModel, self).flags(index) + + def data(self, index, role=None): + if role is None: + role = QtCore.Qt.DisplayRole + + if index.column() == 1: + if role == QtCore.Qt.DecorationRole: + return None + + if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): + role = DATE_MODIFIED_ROLE + index = self.index(index.row(), 0, index.parent()) + + return super(PublishFilesModel, self).data(index, role) + + def headerData(self, section, orientation, role): + # Show nice labels in the header + if ( + role == QtCore.Qt.DisplayRole + and orientation == QtCore.Qt.Horizontal + ): + if section == 0: + return "Name" + elif section == 1: + return "Date modified" + + return super(PublishFilesModel, self).headerData( + section, orientation, role + ) diff --git a/openpype/tools/workfiles/save_as_dialog.py b/openpype/tools/workfiles/save_as_dialog.py new file mode 100644 index 0000000000..e616a325cc --- /dev/null +++ b/openpype/tools/workfiles/save_as_dialog.py @@ -0,0 +1,482 @@ +import os +import re +import copy +import logging + +from Qt import QtWidgets, QtCore + +from avalon import api, io + +from openpype.lib import ( + get_last_workfile_with_version, + get_workdir_data, +) +from openpype.tools.utils import PlaceholderLineEdit + +log = logging.getLogger(__name__) + + +def build_workfile_data(session): + """Get the data required for workfile formatting from avalon `session`""" + + # Set work file data for template formatting + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + project_doc = io.find_one( + {"type": "project"}, + { + "name": True, + "data.code": True, + "config.tasks": True, + } + ) + + asset_doc = io.find_one( + { + "type": "asset", + "name": asset_name + }, + { + "name": True, + "data.tasks": True, + "data.parents": True + } + ) + data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data.update({ + "version": 1, + "comment": "", + "ext": None + }) + + return data + + +class CommentMatcher(object): + """Use anatomy and work file data to parse comments from filenames""" + def __init__(self, anatomy, template_key, data): + + self.fname_regex = None + + template = anatomy.templates[template_key]["file"] + if "{comment}" not in template: + # Don't look for comment if template doesn't allow it + return + + # Create a regex group for extensions + extensions = api.registered_host().file_extensions() + any_extension = "(?:{})".format( + "|".join(re.escape(ext[1:]) for ext in extensions) + ) + + # Use placeholders that will never be in the filename + temp_data = copy.deepcopy(data) + temp_data["comment"] = "<>" + temp_data["version"] = "<>" + temp_data["ext"] = "<>" + + formatted = anatomy.format(temp_data) + fname_pattern = formatted[template_key]["file"] + fname_pattern = re.escape(fname_pattern) + + # Replace comment and version with something we can match with regex + replacements = { + "<>": "(.+)", + "<>": "[0-9]+", + "<>": any_extension, + } + for src, dest in replacements.items(): + fname_pattern = fname_pattern.replace(re.escape(src), dest) + + # Match from beginning to end of string to be safe + fname_pattern = "^{}$".format(fname_pattern) + + self.fname_regex = re.compile(fname_pattern) + + def parse_comment(self, filepath): + """Parse the {comment} part from a filename""" + if not self.fname_regex: + return + + fname = os.path.basename(filepath) + match = self.fname_regex.match(fname) + if match: + return match.group(1) + + +class SubversionLineEdit(QtWidgets.QWidget): + """QLineEdit with QPushButton for drop down selection of list of strings""" + + text_changed = QtCore.Signal(str) + + def __init__(self, *args, **kwargs): + super(SubversionLineEdit, self).__init__(*args, **kwargs) + + input_field = PlaceholderLineEdit(self) + menu_btn = QtWidgets.QPushButton(self) + menu_btn.setFixedWidth(18) + + menu = QtWidgets.QMenu(self) + menu_btn.setMenu(menu) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(3) + + layout.addWidget(input_field, 1) + layout.addWidget(menu_btn, 0) + + input_field.textChanged.connect(self.text_changed) + + self.setFocusProxy(input_field) + + self._input_field = input_field + self._menu_btn = menu_btn + self._menu = menu + + def set_placeholder(self, placeholder): + self._input_field.setPlaceholderText(placeholder) + + def set_text(self, text): + self._input_field.setText(text) + + def set_values(self, values): + self._update(values) + + def _on_button_clicked(self): + self._menu.exec_() + + def _on_action_clicked(self, action): + self._input_field.setText(action.text()) + + def _update(self, values): + """Create optional predefined subset names + + Args: + default_names(list): all predefined names + + Returns: + None + """ + + menu = self._menu + button = self._menu_btn + + state = any(values) + button.setEnabled(state) + if state is False: + return + + # Include an empty string + values = [""] + sorted(values) + + # Get and destroy the action group + group = button.findChild(QtWidgets.QActionGroup) + if group: + group.deleteLater() + + # Build new action group + group = QtWidgets.QActionGroup(button) + for name in values: + action = group.addAction(name) + menu.addAction(action) + + group.triggered.connect(self._on_action_clicked) + + +class SaveAsDialog(QtWidgets.QDialog): + """Name Window to define a unique filename inside a root folder + + The filename will be based on the "workfile" template defined in the + project["config"]["template"]. + + """ + + def __init__(self, parent, root, anatomy, template_key, session=None): + super(SaveAsDialog, self).__init__(parent=parent) + self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) + + self.result = None + self.host = api.registered_host() + self.root = root + self.work_file = None + + if not session: + # Fallback to active session + session = api.Session + + self.data = build_workfile_data(session) + + # Store project anatomy + self.anatomy = anatomy + self.template = anatomy.templates[template_key]["file"] + self.template_key = template_key + + # Btns widget + btns_widget = QtWidgets.QWidget(self) + + btn_ok = QtWidgets.QPushButton("Ok", btns_widget) + btn_cancel = QtWidgets.QPushButton("Cancel", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.addWidget(btn_ok) + btns_layout.addWidget(btn_cancel) + + # Inputs widget + inputs_widget = QtWidgets.QWidget(self) + + # Version widget + version_widget = QtWidgets.QWidget(inputs_widget) + + # Version number input + version_input = QtWidgets.QSpinBox(version_widget) + version_input.setMinimum(1) + version_input.setMaximum(9999) + + # Last version checkbox + last_version_check = QtWidgets.QCheckBox( + "Next Available Version", version_widget + ) + last_version_check.setChecked(True) + + version_layout = QtWidgets.QHBoxLayout(version_widget) + version_layout.setContentsMargins(0, 0, 0, 0) + version_layout.addWidget(version_input) + version_layout.addWidget(last_version_check) + + # Preview widget + preview_label = QtWidgets.QLabel("Preview filename", inputs_widget) + + # Subversion input + subversion = SubversionLineEdit(inputs_widget) + subversion.set_placeholder("Will be part of filename.") + + # Extensions combobox + ext_combo = QtWidgets.QComboBox(inputs_widget) + # Add styled delegate to use stylesheets + ext_delegate = QtWidgets.QStyledItemDelegate() + ext_combo.setItemDelegate(ext_delegate) + ext_combo.addItems(self.host.file_extensions()) + + # Build inputs + inputs_layout = QtWidgets.QFormLayout(inputs_widget) + # Add version only if template contains version key + # - since the version can be padded with "{version:0>4}" we only search + # for "{version". + if "{version" in self.template: + inputs_layout.addRow("Version:", version_widget) + else: + version_widget.setVisible(False) + + # Add subversion only if template contains `{comment}` + if "{comment}" in self.template: + inputs_layout.addRow("Subversion:", subversion) + + # Detect whether a {comment} is in the current filename - if so, + # preserve it by default and set it in the comment/subversion field + current_filepath = self.host.current_file() + if current_filepath: + # We match the current filename against the current session + # instead of the session where the user is saving to. + current_data = build_workfile_data(api.Session) + matcher = CommentMatcher(anatomy, template_key, current_data) + comment = matcher.parse_comment(current_filepath) + if comment: + log.info("Detected subversion comment: {}".format(comment)) + self.data["comment"] = comment + subversion.set_text(comment) + + existing_comments = self.get_existing_comments() + subversion.set_values(existing_comments) + + else: + subversion.setVisible(False) + inputs_layout.addRow("Extension:", ext_combo) + inputs_layout.addRow("Preview:", preview_label) + + # Build layout + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(inputs_widget) + main_layout.addWidget(btns_widget) + + # Signal callback registration + version_input.valueChanged.connect(self.on_version_spinbox_changed) + last_version_check.stateChanged.connect( + self.on_version_checkbox_changed + ) + + subversion.text_changed.connect(self.on_comment_changed) + ext_combo.currentIndexChanged.connect(self.on_extension_changed) + + btn_ok.pressed.connect(self.on_ok_pressed) + btn_cancel.pressed.connect(self.on_cancel_pressed) + + # Allow "Enter" key to accept the save. + btn_ok.setDefault(True) + + # Force default focus to comment, some hosts didn't automatically + # apply focus to this line edit (e.g. Houdini) + subversion.setFocus() + + # Store widgets + self.btn_ok = btn_ok + + self.version_widget = version_widget + + self.version_input = version_input + self.last_version_check = last_version_check + + self.preview_label = preview_label + self.subversion = subversion + self.ext_combo = ext_combo + self._ext_delegate = ext_delegate + + self.refresh() + + def get_existing_comments(self): + matcher = CommentMatcher(self.anatomy, self.template_key, self.data) + host_extensions = set(self.host.file_extensions()) + comments = set() + if os.path.isdir(self.root): + for fname in os.listdir(self.root): + if not os.path.isfile(os.path.join(self.root, fname)): + continue + + ext = os.path.splitext(fname)[-1] + if ext not in host_extensions: + continue + + comment = matcher.parse_comment(fname) + if comment: + comments.add(comment) + + return list(comments) + + def on_version_spinbox_changed(self, value): + self.data["version"] = value + self.refresh() + + def on_version_checkbox_changed(self, _value): + self.refresh() + + def on_comment_changed(self, text): + self.data["comment"] = text + self.refresh() + + def on_extension_changed(self): + ext = self.ext_combo.currentText() + if ext == self.data["ext"]: + return + self.data["ext"] = ext + self.refresh() + + def on_ok_pressed(self): + self.result = self.work_file + self.close() + + def on_cancel_pressed(self): + self.close() + + def get_result(self): + return self.result + + def get_work_file(self): + data = copy.deepcopy(self.data) + if not data["comment"]: + data.pop("comment", None) + + data["ext"] = data["ext"][1:] + + anatomy_filled = self.anatomy.format(data) + return anatomy_filled[self.template_key]["file"] + + def refresh(self): + extensions = self.host.file_extensions() + extension = self.data["ext"] + if extension is None: + # Define saving file extension + current_file = self.host.current_file() + if current_file: + # Match the extension of current file + _, extension = os.path.splitext(current_file) + else: + extension = extensions[0] + + if extension != self.data["ext"]: + self.data["ext"] = extension + index = self.ext_combo.findText( + extension, QtCore.Qt.MatchFixedString + ) + if index >= 0: + self.ext_combo.setCurrentIndex(index) + + if not self.last_version_check.isChecked(): + self.version_input.setEnabled(True) + self.data["version"] = self.version_input.value() + + work_file = self.get_work_file() + + else: + self.version_input.setEnabled(False) + + data = copy.deepcopy(self.data) + template = str(self.template) + + if not data["comment"]: + data.pop("comment", None) + + data["ext"] = data["ext"][1:] + + version = get_last_workfile_with_version( + self.root, template, data, extensions + )[1] + + if version is None: + version = 1 + else: + version += 1 + + found_valid_version = False + # Check if next version is valid version and give a chance to try + # next 100 versions + for idx in range(100): + # Store version to data + self.data["version"] = version + + work_file = self.get_work_file() + # Safety check + path = os.path.join(self.root, work_file) + if not os.path.exists(path): + found_valid_version = True + break + + # Try next version + version += 1 + # Log warning + if idx == 0: + log.warning(( + "BUG: Function `get_last_workfile_with_version` " + "didn't return last version." + )) + # Raise exception if even 100 version fallback didn't help + if not found_valid_version: + raise AssertionError( + "This is a bug. Couldn't find valid version!" + ) + + self.work_file = work_file + + path_exists = os.path.exists(os.path.join(self.root, work_file)) + + self.btn_ok.setEnabled(not path_exists) + + if path_exists: + self.preview_label.setText( + "Cannot create \"{0}\" because file exists!" + "".format(work_file) + ) + else: + self.preview_label.setText( + "{0}".format(work_file) + ) diff --git a/openpype/tools/workfiles/view.py b/openpype/tools/workfiles/view.py deleted file mode 100644 index 8e3993e4c7..0000000000 --- a/openpype/tools/workfiles/view.py +++ /dev/null @@ -1,15 +0,0 @@ -from Qt import QtWidgets, QtCore - - -class FilesView(QtWidgets.QTreeView): - doubleClickedLeft = QtCore.Signal() - doubleClickedRight = QtCore.Signal() - - def mouseDoubleClickEvent(self, event): - if event.button() == QtCore.Qt.LeftButton: - self.doubleClickedLeft.emit() - - elif event.button() == QtCore.Qt.RightButton: - self.doubleClickedRight.emit() - - return super(FilesView, self).mouseDoubleClickEvent(event) diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py new file mode 100644 index 0000000000..8654a18036 --- /dev/null +++ b/openpype/tools/workfiles/window.py @@ -0,0 +1,393 @@ +import os +import datetime +from Qt import QtCore, QtWidgets + +from avalon import io + +from openpype import style +from openpype.lib import ( + get_workfile_doc, + create_workfile_doc, + save_workfile_data_to_doc, +) +from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget +from openpype.tools.utils.tasks_widget import TasksWidget + +from .files_widget import FilesWidget +from .lib import TempPublishFiles, file_size_to_string + + +class SidePanelWidget(QtWidgets.QWidget): + save_clicked = QtCore.Signal() + published_workfile_message = ( + "INFO: Opened published workfiles will be stored in" + " temp directory on your machine. Current temp size: {}." + ) + + def __init__(self, parent=None): + super(SidePanelWidget, self).__init__(parent) + + details_label = QtWidgets.QLabel("Details", self) + details_input = QtWidgets.QPlainTextEdit(self) + details_input.setReadOnly(True) + + artist_note_widget = QtWidgets.QWidget(self) + note_label = QtWidgets.QLabel("Artist note", artist_note_widget) + note_input = QtWidgets.QPlainTextEdit(artist_note_widget) + btn_note_save = QtWidgets.QPushButton("Save note", artist_note_widget) + + artist_note_layout = QtWidgets.QVBoxLayout(artist_note_widget) + artist_note_layout.setContentsMargins(0, 0, 0, 0) + artist_note_layout.addWidget(note_label, 0) + artist_note_layout.addWidget(note_input, 1) + artist_note_layout.addWidget( + btn_note_save, 0, alignment=QtCore.Qt.AlignRight + ) + + publish_temp_widget = QtWidgets.QWidget(self) + publish_temp_info_label = QtWidgets.QLabel( + self.published_workfile_message.format( + file_size_to_string(0) + ), + publish_temp_widget + ) + publish_temp_info_label.setWordWrap(True) + + btn_clear_temp = QtWidgets.QPushButton( + "Clear temp", publish_temp_widget + ) + + publish_temp_layout = QtWidgets.QVBoxLayout(publish_temp_widget) + publish_temp_layout.setContentsMargins(0, 0, 0, 0) + publish_temp_layout.addWidget(publish_temp_info_label, 0) + publish_temp_layout.addWidget( + btn_clear_temp, 0, alignment=QtCore.Qt.AlignRight + ) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(details_label, 0) + main_layout.addWidget(details_input, 1) + main_layout.addWidget(artist_note_widget, 1) + main_layout.addWidget(publish_temp_widget, 0) + + note_input.textChanged.connect(self._on_note_change) + btn_note_save.clicked.connect(self._on_save_click) + btn_clear_temp.clicked.connect(self._on_clear_temp_click) + + self._details_input = details_input + self._artist_note_widget = artist_note_widget + self._note_input = note_input + self._btn_note_save = btn_note_save + + self._publish_temp_info_label = publish_temp_info_label + self._publish_temp_widget = publish_temp_widget + + self._orig_note = "" + self._workfile_doc = None + + publish_temp_widget.setVisible(False) + + def set_published_visible(self, published_visible): + self._artist_note_widget.setVisible(not published_visible) + self._publish_temp_widget.setVisible(published_visible) + if published_visible: + self.refresh_publish_temp_sizes() + + def refresh_publish_temp_sizes(self): + temp_publish_files = TempPublishFiles() + text = self.published_workfile_message.format( + file_size_to_string(temp_publish_files.size) + ) + self._publish_temp_info_label.setText(text) + + def _on_clear_temp_click(self): + temp_publish_files = TempPublishFiles() + temp_publish_files.clear() + self.refresh_publish_temp_sizes() + + def _on_note_change(self): + text = self._note_input.toPlainText() + self._btn_note_save.setEnabled(self._orig_note != text) + + def _on_save_click(self): + self._orig_note = self._note_input.toPlainText() + self._on_note_change() + self.save_clicked.emit() + + def set_context(self, asset_id, task_name, filepath, workfile_doc): + # Check if asset, task and file are selected + # NOTE workfile document is not requirement + enabled = bool(asset_id) and bool(task_name) and bool(filepath) + + self._details_input.setEnabled(enabled) + self._note_input.setEnabled(enabled) + self._btn_note_save.setEnabled(enabled) + + # Make sure workfile doc is overridden + self._workfile_doc = workfile_doc + # Disable inputs and remove texts if any required arguments are missing + if not enabled: + self._orig_note = "" + self._details_input.setPlainText("") + self._note_input.setPlainText("") + return + + orig_note = "" + if workfile_doc: + orig_note = workfile_doc["data"].get("note") or orig_note + + self._orig_note = orig_note + self._note_input.setPlainText(orig_note) + # Set as empty string + self._details_input.setPlainText("") + + filestat = os.stat(filepath) + size_value = file_size_to_string(filestat.st_size) + + # Append html string + datetime_format = "%b %d %Y %H:%M:%S" + creation_time = datetime.datetime.fromtimestamp(filestat.st_ctime) + modification_time = datetime.datetime.fromtimestamp(filestat.st_mtime) + lines = ( + "Size:", + size_value, + "Created:", + creation_time.strftime(datetime_format), + "Modified:", + modification_time.strftime(datetime_format) + ) + self._details_input.appendHtml("
".join(lines)) + + def get_workfile_data(self): + data = { + "note": self._note_input.toPlainText() + } + return self._workfile_doc, data + + +class Window(QtWidgets.QMainWindow): + """Work Files Window""" + title = "Work Files" + + def __init__(self, parent=None): + super(Window, self).__init__(parent=parent) + self.setWindowTitle(self.title) + window_flags = QtCore.Qt.Window | QtCore.Qt.WindowCloseButtonHint + if not parent: + window_flags |= QtCore.Qt.WindowStaysOnTopHint + self.setWindowFlags(window_flags) + + # Create pages widget and set it as central widget + pages_widget = QtWidgets.QStackedWidget(self) + self.setCentralWidget(pages_widget) + + home_page_widget = QtWidgets.QWidget(pages_widget) + home_body_widget = QtWidgets.QWidget(home_page_widget) + + assets_widget = SingleSelectAssetsWidget(io, parent=home_body_widget) + assets_widget.set_current_asset_btn_visibility(True) + + tasks_widget = TasksWidget(io, home_body_widget) + files_widget = FilesWidget(home_body_widget) + side_panel = SidePanelWidget(home_body_widget) + + pages_widget.addWidget(home_page_widget) + + # Build home + home_page_layout = QtWidgets.QVBoxLayout(home_page_widget) + home_page_layout.addWidget(home_body_widget) + + # Build home - body + body_layout = QtWidgets.QVBoxLayout(home_body_widget) + split_widget = QtWidgets.QSplitter(home_body_widget) + split_widget.addWidget(assets_widget) + split_widget.addWidget(tasks_widget) + split_widget.addWidget(files_widget) + split_widget.addWidget(side_panel) + split_widget.setSizes([255, 160, 455, 175]) + + body_layout.addWidget(split_widget) + + # Add top margin for tasks to align it visually with files as + # the files widget has a filter field which tasks does not. + tasks_widget.setContentsMargins(0, 32, 0, 0) + + # Set context after asset widget is refreshed + # - to do so it is necessary to wait until refresh is done + set_context_timer = QtCore.QTimer() + set_context_timer.setInterval(100) + + # Connect signals + set_context_timer.timeout.connect(self._on_context_set_timeout) + assets_widget.selection_changed.connect(self._on_asset_changed) + tasks_widget.task_changed.connect(self._on_task_changed) + files_widget.file_selected.connect(self.on_file_select) + files_widget.workfile_created.connect(self.on_workfile_create) + files_widget.file_opened.connect(self._on_file_opened) + files_widget.publish_file_viewed.connect( + self._on_publish_file_viewed + ) + files_widget.published_visible_changed.connect( + self._on_published_change + ) + side_panel.save_clicked.connect(self.on_side_panel_save) + + self._set_context_timer = set_context_timer + self.home_page_widget = home_page_widget + self.pages_widget = pages_widget + self.home_body_widget = home_body_widget + self.split_widget = split_widget + + self.assets_widget = assets_widget + self.tasks_widget = tasks_widget + self.files_widget = files_widget + self.side_panel = side_panel + + # Force focus on the open button by default, required for Houdini. + files_widget.setFocus() + + self.resize(1200, 600) + + self._first_show = True + self._context_to_set = None + + def showEvent(self, event): + super(Window, self).showEvent(event) + if self._first_show: + self._first_show = False + self.refresh() + self.setStyleSheet(style.load_stylesheet()) + + def keyPressEvent(self, event): + """Custom keyPressEvent. + + Override keyPressEvent to do nothing so that Maya's panels won't + take focus when pressing "SHIFT" whilst mouse is over viewport or + outliner. This way users don't accidentally perform Maya commands + whilst trying to name an instance. + + """ + + def set_save_enabled(self, enabled): + self.files_widget.set_save_enabled(enabled) + + def on_file_select(self, filepath): + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + + workfile_doc = None + if asset_id and task_name and filepath: + filename = os.path.split(filepath)[1] + workfile_doc = get_workfile_doc( + asset_id, task_name, filename, io + ) + self.side_panel.set_context( + asset_id, task_name, filepath, workfile_doc + ) + + def on_workfile_create(self, filepath): + self._create_workfile_doc(filepath) + + def _on_file_opened(self): + self.close() + + def _on_publish_file_viewed(self): + self.side_panel.refresh_publish_temp_sizes() + + def _on_published_change(self, visible): + self.side_panel.set_published_visible(visible) + + def on_side_panel_save(self): + workfile_doc, data = self.side_panel.get_workfile_data() + if not workfile_doc: + filepath = self.files_widget._get_selected_filepath() + self._create_workfile_doc(filepath, force=True) + workfile_doc = self._get_current_workfile_doc() + + save_workfile_data_to_doc(workfile_doc, data, io) + + def _get_current_workfile_doc(self, filepath=None): + if filepath is None: + filepath = self.files_widget._get_selected_filepath() + task_name = self.tasks_widget.get_selected_task_name() + asset_id = self.assets_widget.get_selected_asset_id() + if not task_name or not asset_id or not filepath: + return + + filename = os.path.split(filepath)[1] + return get_workfile_doc( + asset_id, task_name, filename, io + ) + + def _create_workfile_doc(self, filepath, force=False): + workfile_doc = None + if not force: + workfile_doc = self._get_current_workfile_doc(filepath) + + if not workfile_doc: + workdir, filename = os.path.split(filepath) + asset_id = self.assets_widget.get_selected_asset_id() + asset_doc = io.find_one({"_id": asset_id}) + task_name = self.tasks_widget.get_selected_task_name() + create_workfile_doc(asset_doc, task_name, filename, workdir, io) + + def refresh(self): + # Refresh asset widget + self.assets_widget.refresh() + + self._on_task_changed() + + def set_context(self, context): + self._context_to_set = context + self._set_context_timer.start() + + def _on_context_set_timeout(self): + if self._context_to_set is None: + self._set_context_timer.stop() + return + + if self.assets_widget.refreshing: + return + + self._context_to_set, context = None, self._context_to_set + if "asset" in context: + asset_doc = io.find_one( + { + "name": context["asset"], + "type": "asset" + }, + {"_id": 1} + ) or {} + asset_id = asset_doc.get("_id") + # Select the asset + self.assets_widget.select_asset(asset_id) + self.tasks_widget.set_asset_id(asset_id) + + if "task" in context: + self.tasks_widget.select_task_name(context["task"]) + self._on_task_changed() + + def _on_asset_changed(self): + asset_id = self.assets_widget.get_selected_asset_id() + if asset_id: + self.tasks_widget.setEnabled(True) + else: + # Force disable the other widgets if no + # active selection + self.tasks_widget.setEnabled(False) + self.files_widget.setEnabled(False) + + self.tasks_widget.set_asset_id(asset_id) + + def _on_task_changed(self): + asset_id = self.assets_widget.get_selected_asset_id() + task_name = self.tasks_widget.get_selected_task_name() + task_type = self.tasks_widget.get_selected_task_type() + + asset_is_valid = asset_id is not None + self.tasks_widget.setEnabled(asset_is_valid) + + self.files_widget.setEnabled(bool(task_name) and asset_is_valid) + self.files_widget.set_asset_task(asset_id, task_name, task_type) + self.files_widget.refresh() diff --git a/openpype/version.py b/openpype/version.py index d2182ac7da..2390309e76 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.9.0" +__version__ = "3.9.2-nightly.1" diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/widgets/attribute_defs/widgets.py index a6f1b8d6c9..23f025967d 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/widgets/attribute_defs/widgets.py @@ -2,7 +2,7 @@ import uuid from Qt import QtWidgets, QtCore -from openpype.pipeline.lib import ( +from openpype.lib.attribute_definitions import ( AbtractAttrDef, UnknownDef, NumberDef, diff --git a/pyproject.toml b/pyproject.toml index 681702560a..90e264d456 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.9.0" # OpenPype +version = "3.9.2-nightly.1" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" diff --git a/repos/avalon-core b/repos/avalon-core index 7753d15507..2fa14cea6f 160000 --- a/repos/avalon-core +++ b/repos/avalon-core @@ -1 +1 @@ -Subproject commit 7753d15507afadc143b7d49db8fcfaa6a29fed91 +Subproject commit 2fa14cea6f6a9d86eec70bbb96860cbe4c75c8eb diff --git a/tests/README.md b/tests/README.md index bb1cdbdef8..69828cdbc2 100644 --- a/tests/README.md +++ b/tests/README.md @@ -21,3 +21,27 @@ Specific location could be provided to this command as an argument, either as ab (eg. `python ${OPENPYPE_ROOT}/start.py start.py runtests ../tests/integration`) will trigger only tests in `integration` folder. See `${OPENPYPE_ROOT}/cli.py:runtests` for other arguments. + +Run in IDE: +----------- +If you prefer to run/debug single file directly in IDE of your choice, you might encounter issues with imports. +It would manifest like `KeyError: 'OPENPYPE_DATABASE_NAME'`. That means you are importing module that depends on OP to be running, eg. all expected variables are set. + +In some cases your tests might be so localized, that you don't care about all env vars to be set properly. +In that case you might add this dummy configuration BEFORE any imports in your test file +``` +import os +os.environ["AVALON_MONGO"] = "mongodb://localhost:27017" +os.environ["OPENPYPE_MONGO"] = "mongodb://localhost:27017" +os.environ["AVALON_DB"] = "avalon" +os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" +os.environ["AVALON_TIMEOUT"] = '3000' +os.environ["OPENPYPE_DEBUG"] = "3" +os.environ["AVALON_CONFIG"] = "pype" +os.environ["AVALON_ASSET"] = "Asset" +os.environ["AVALON_PROJECT"] = "test_project" +``` +(AVALON_ASSET and AVALON_PROJECT values should exist in your environment) + +This might be enough to run your test file separately. Do not commit this skeleton though. +Use only when you know what you are doing! \ No newline at end of file diff --git a/tests/unit/openpype/lib/test_delivery.py b/tests/unit/openpype/lib/test_delivery.py new file mode 100644 index 0000000000..04a71655e3 --- /dev/null +++ b/tests/unit/openpype/lib/test_delivery.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +"""Test suite for delivery functions.""" +from openpype.lib.delivery import collect_frames + + +def test_collect_frames_multi_sequence(): + files = ["Asset_renderCompositingMain_v001.0000.png", + "Asset_renderCompositingMain_v001.0001.png", + "Asset_renderCompositingMain_v001.0002.png"] + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.0000.png": "0000", + "Asset_renderCompositingMain_v001.0001.png": "0001", + "Asset_renderCompositingMain_v001.0002.png": "0002" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_multi_sequence_different_format(): + files = ["Asset.v001.renderCompositingMain.0000.png", + "Asset.v001.renderCompositingMain.0001.png", + "Asset.v001.renderCompositingMain.0002.png"] + ret = collect_frames(files) + + expected = { + "Asset.v001.renderCompositingMain.0000.png": "0000", + "Asset.v001.renderCompositingMain.0001.png": "0001", + "Asset.v001.renderCompositingMain.0002.png": "0002" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence(): + files = ["Asset_renderCompositingMain_v001.0000.png"] + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.0000.png": "0000" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_negative(): + files = ["Asset_renderCompositingMain_v001.-0000.png"] + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.-0000.png": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_shot(): + files = ["testing_sh010_workfileCompositing_v001.aep"] + ret = collect_frames(files) + + expected = { + "testing_sh010_workfileCompositing_v001.aep": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_numbers(): + files = ["PRJ_204_430_0005_renderLayoutMain_v001.0001.exr"] + ret = collect_frames(files) + + expected = { + "PRJ_204_430_0005_renderLayoutMain_v001.0001.exr": "0001" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_shot_with_frame(): + files = ["testing_sh010_workfileCompositing_000_v001.aep"] + ret = collect_frames(files) + + expected = { + "testing_sh010_workfileCompositing_000_v001.aep": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_full_path(): + files = ['C:/test_project/assets/locations/Town/work/compositing\\renders\\aftereffects\\test_project_TestAsset_compositing_v001\\TestAsset_renderCompositingMain_v001.mov'] # noqa: E501 + ret = collect_frames(files) + + expected = { + 'C:/test_project/assets/locations/Town/work/compositing\\renders\\aftereffects\\test_project_TestAsset_compositing_v001\\TestAsset_renderCompositingMain_v001.mov': None # noqa: E501 + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_different_format(): + files = ["Asset.v001.renderCompositingMain_0000.png"] + ret = collect_frames(files) + + expected = { + "Asset.v001.renderCompositingMain_0000.png": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_withhout_version(): + files = ["pngv001.renderCompositingMain_0000.png"] + ret = collect_frames(files) + + expected = { + "pngv001.renderCompositingMain_0000.png": None + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_sequence_as_dict(): + files = {"Asset_renderCompositingMain_v001.0000.png"} + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.0000.png": "0000" + } + + print(ret) + assert ret == expected, "Not matching" + + +def test_collect_frames_single_file(): + files = {"Asset_renderCompositingMain_v001.png"} + ret = collect_frames(files) + + expected = { + "Asset_renderCompositingMain_v001.png": None + } + + print(ret) + assert ret == expected, "Not matching" + diff --git a/tools/ci_tools.py b/tools/ci_tools.py index aeb367af38..4c59cd6af6 100644 --- a/tools/ci_tools.py +++ b/tools/ci_tools.py @@ -8,8 +8,12 @@ import os def get_release_type_github(Log, github_token): # print(Log) - minor_labels = ["type: feature", "type: deprecated"] - patch_labels = ["type: enhancement", "type: bug"] + minor_labels = ["Bump Minor"] + # patch_labels = [ + # "type: enhancement", + # "type: bug", + # "type: deprecated", + # "type: Feature"] g = Github(github_token) repo = g.get_repo("pypeclub/OpenPype") @@ -28,9 +32,12 @@ def get_release_type_github(Log, github_token): if any(label in labels for label in minor_labels): return "minor" - - if any(label in labels for label in patch_labels): + else: return "patch" + + # TODO: if all is working fine, this part can be cleaned up eventually + # if any(label in labels for label in patch_labels): + # return "patch" return None diff --git a/website/src/components/BadgesSection/badges.js b/website/src/components/BadgesSection/badges.js index 4bc85df2ef..5b179d066d 100644 --- a/website/src/components/BadgesSection/badges.js +++ b/website/src/components/BadgesSection/badges.js @@ -1,58 +1,63 @@ export default { upper: [ - { - title: "License", - src: - "https://img.shields.io/github/license/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", - }, - { - title: "Release", - src: - "https://img.shields.io/github/v/release/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", - }, - { - title: "Requirements State", - src: - "https://img.shields.io/requires/github/pypeclub/pype?labelColor=303846", - href: - "https://requires.io/github/pypeclub/pype/requirements/?branch=main", - }, { title: "VFX Platform", src: "https://img.shields.io/badge/vfx%20platform-2021-lightgrey?labelColor=303846", href: "https://vfxplatform.com", }, + { + title: "License", + src: + "https://img.shields.io/github/license/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, + { + title: "Release", + src: + "https://img.shields.io/github/v/release/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, { title: "GitHub last commit", src: - "https://img.shields.io/github/last-commit/pypeclub/pype/develop?labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/last-commit/pypeclub/openpype/develop?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "GitHub commit activity", src: - "https://img.shields.io/github/commit-activity/y/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/commit-activity/y/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "Repository Size", src: - "https://img.shields.io/github/repo-size/pypeclub/pype?labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/repo-size/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, + { + title: "Repository Size", + src: + "https://img.shields.io/github/contributors/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", + }, + { + title: "Stars", + src: + "https://img.shields.io/github/stars/pypeclub?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "Forks", src: - "https://img.shields.io/github/forks/pypeclub/pype?style=social&labelColor=303846", - href: "https://github.com/pypeclub/pype", + "https://img.shields.io/github/forks/pypeclub/openpype?labelColor=303846", + href: "https://github.com/pypeclub/openpype", }, { title: "Discord", src: - "https://img.shields.io/discord/517362899170230292?label=discord&logo=discord&logoColor=white&labelColor=303846", + "https://img.shields.io/discord/517362899170230292?label=discord&logo=discord&logoColor=white&labelColor=303846", href: "https://discord.gg/sFNPWXG", }, ], diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 29b81e973f..791b309bbc 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -129,6 +129,21 @@ const studios = [ title: "Moonrock Animation Studio", image: "/img/moonrock_logo.png", infoLink: "https://www.moonrock.eu/", + }, + { + title: "Lumine Studio", + image: "/img/LUMINE_LogoMaster_black_2k.png", + infoLink: "https://www.luminestudio.com/", + }, + { + title: "Overmind Studios", + image: "/img/OMS_logo_black_color.png", + infoLink: "https://www.overmind-studios.de/", + }, + { + title: "Ember Light", + image: "/img/EmberLight_black.png", + infoLink: "https://emberlight.se/", } ]; @@ -275,107 +290,121 @@ function Home() { @@ -409,7 +438,7 @@ function Home() { {studios && studios.length && (
-

Studios using openPYPE

+

Studios using openPype

{studios.map((props, idx) => ( diff --git a/website/static/img/EmberLight_black.png b/website/static/img/EmberLight_black.png new file mode 100644 index 0000000000..e8a9f95d06 Binary files /dev/null and b/website/static/img/EmberLight_black.png differ diff --git a/website/static/img/Ember_Light.jpg b/website/static/img/Ember_Light.jpg new file mode 100644 index 0000000000..9081bc7cea Binary files /dev/null and b/website/static/img/Ember_Light.jpg differ diff --git a/website/static/img/LUMINE_LogoMaster_black_2k.png b/website/static/img/LUMINE_LogoMaster_black_2k.png new file mode 100644 index 0000000000..37ef01486d Binary files /dev/null and b/website/static/img/LUMINE_LogoMaster_black_2k.png differ diff --git a/website/static/img/OMS_logo_black_color.png b/website/static/img/OMS_logo_black_color.png new file mode 100644 index 0000000000..9046927e32 Binary files /dev/null and b/website/static/img/OMS_logo_black_color.png differ diff --git a/website/static/img/app_aquarium.png b/website/static/img/app_aquarium.png new file mode 100644 index 0000000000..1ff0acb8aa Binary files /dev/null and b/website/static/img/app_aquarium.png differ diff --git a/website/static/img/app_flame.png b/website/static/img/app_flame.png new file mode 100644 index 0000000000..ba9b69e45f Binary files /dev/null and b/website/static/img/app_flame.png differ diff --git a/website/static/img/app_kitsu.png b/website/static/img/app_kitsu.png new file mode 100644 index 0000000000..b4e3d00ebd Binary files /dev/null and b/website/static/img/app_kitsu.png differ diff --git a/website/static/img/app_shotgrid.png b/website/static/img/app_shotgrid.png new file mode 100644 index 0000000000..102e70b049 Binary files /dev/null and b/website/static/img/app_shotgrid.png differ