diff --git a/client/ayon_core/addon/base.py b/client/ayon_core/addon/base.py index 3d028dba07..21b1193b07 100644 --- a/client/ayon_core/addon/base.py +++ b/client/ayon_core/addon/base.py @@ -50,7 +50,7 @@ IGNORED_MODULES_IN_AYON = set() # When addon was moved from ayon-core codebase # - this is used to log the missing addon MOVED_ADDON_MILESTONE_VERSIONS = { - "applications": VersionInfo(2, 0, 0), + "applications": VersionInfo(0, 2, 0), } # Inherit from `object` for Python 2 hosts diff --git a/client/ayon_core/hooks/pre_ocio_hook.py b/client/ayon_core/hooks/pre_ocio_hook.py index 0817afec71..6c30b267bc 100644 --- a/client/ayon_core/hooks/pre_ocio_hook.py +++ b/client/ayon_core/hooks/pre_ocio_hook.py @@ -1,7 +1,7 @@ from ayon_applications import PreLaunchHook -from ayon_core.pipeline.colorspace import get_imageio_config -from ayon_core.pipeline.template_data import get_template_data_with_names +from ayon_core.pipeline.colorspace import get_imageio_config_preset +from ayon_core.pipeline.template_data import get_template_data class OCIOEnvHook(PreLaunchHook): @@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook): def execute(self): """Hook entry method.""" - template_data = get_template_data_with_names( - project_name=self.data["project_name"], - folder_path=self.data["folder_path"], - task_name=self.data["task_name"], + folder_entity = self.data["folder_entity"] + + template_data = get_template_data( + self.data["project_entity"], + folder_entity=folder_entity, + task_entity=self.data["task_entity"], host_name=self.host_name, - settings=self.data["project_settings"] + settings=self.data["project_settings"], ) - config_data = get_imageio_config( - project_name=self.data["project_name"], - host_name=self.host_name, - project_settings=self.data["project_settings"], - anatomy_data=template_data, + config_data = get_imageio_config_preset( + self.data["project_name"], + self.data["folder_path"], + self.data["task_name"], + self.host_name, anatomy=self.data["anatomy"], + project_settings=self.data["project_settings"], + template_data=template_data, env=self.launch_context.env, + folder_id=folder_entity["id"], ) - if config_data: - ocio_path = config_data["path"] - - if self.host_name in ["nuke", "hiero"]: - ocio_path = ocio_path.replace("\\", "/") - - self.log.info( - f"Setting OCIO environment to config path: {ocio_path}") - - self.launch_context.env["OCIO"] = ocio_path - else: + if not config_data: self.log.debug("OCIO not set or enabled") + return + + ocio_path = config_data["path"] + + if self.host_name in ["nuke", "hiero"]: + ocio_path = ocio_path.replace("\\", "/") + + self.log.info( + f"Setting OCIO environment to config path: {ocio_path}") + + self.launch_context.env["OCIO"] = ocio_path diff --git a/client/ayon_core/hosts/aftereffects/api/launch_logic.py b/client/ayon_core/hosts/aftereffects/api/launch_logic.py index 5a23f2cb35..da6887668a 100644 --- a/client/ayon_core/hosts/aftereffects/api/launch_logic.py +++ b/client/ayon_core/hosts/aftereffects/api/launch_logic.py @@ -60,7 +60,7 @@ def main(*subprocess_args): ) ) - elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True): + elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True): save = False if os.getenv("WORKFILES_SAVE_AS"): save = True diff --git a/client/ayon_core/hosts/aftereffects/api/pipeline.py b/client/ayon_core/hosts/aftereffects/api/pipeline.py index 105fee64b9..2239040f09 100644 --- a/client/ayon_core/hosts/aftereffects/api/pipeline.py +++ b/client/ayon_core/hosts/aftereffects/api/pipeline.py @@ -8,14 +8,11 @@ from ayon_core.lib import Logger, register_event_callback from ayon_core.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, + register_workfile_build_plugin_path, AVALON_CONTAINER_ID, AVALON_INSTANCE_ID, AYON_INSTANCE_ID, ) -from ayon_core.hosts.aftereffects.api.workfile_template_builder import ( - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin -) from ayon_core.pipeline.load import any_outdated_containers import ayon_core.hosts.aftereffects @@ -40,6 +37,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): @@ -76,6 +74,7 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) register_event_callback("application.launched", application_launch) @@ -118,12 +117,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): item["id"] = "publish_context" self.stub.imprint(item["id"], item) - def get_workfile_build_placeholder_plugins(self): - return [ - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin - ] - # created instances section def list_instances(self): """List all created instances from current workfile which diff --git a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py index aa2f36e8aa..99d5bbb938 100644 --- a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py @@ -1,6 +1,7 @@ import os.path import uuid import shutil +from abc import abstractmethod from ayon_core.pipeline import registered_host from ayon_core.tools.workfile_template_build import ( @@ -9,13 +10,9 @@ from ayon_core.tools.workfile_template_build import ( from ayon_core.pipeline.workfile.workfile_template_builder import ( AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin + PlaceholderItem ) from ayon_core.hosts.aftereffects.api import get_stub -from ayon_core.hosts.aftereffects.api.lib import set_settings PLACEHOLDER_SET = "PLACEHOLDERS_SET" PLACEHOLDER_ID = "openpype.placeholder" @@ -51,6 +48,10 @@ class AETemplateBuilder(AbstractTemplateBuilder): class AEPlaceholderPlugin(PlaceholderPlugin): """Contains generic methods for all PlaceholderPlugins.""" + @abstractmethod + def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem: + pass + def collect_placeholders(self): """Collect info from file metadata about created placeholders. @@ -63,17 +64,7 @@ class AEPlaceholderPlugin(PlaceholderPlugin): if item.get("plugin_identifier") != self.identifier: continue - if isinstance(self, AEPlaceholderLoadPlugin): - item = LoadPlaceholderItem(item["uuid"], - item["data"], - self) - elif isinstance(self, AEPlaceholderCreatePlugin): - item = CreatePlaceholderItem(item["uuid"], - item["data"], - self) - else: - raise NotImplementedError(f"Not implemented for {type(self)}") - + item = self._create_placeholder_item(item) output.append(item) return output @@ -135,87 +126,6 @@ class AEPlaceholderPlugin(PlaceholderPlugin): stub.imprint(item_id, container_data) -class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin): - """Adds Create placeholder. - - This adds composition and runs Create - """ - identifier = "aftereffects.create" - label = "AfterEffects create" - - def create_placeholder(self, placeholder_data): - stub = get_stub() - name = "CREATEPLACEHOLDER" - item_id = stub.add_item(name, "COMP") - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Replace 'placeholder' with publishable instance. - - Renames prepared composition name, creates publishable instance, sets - frame/duration settings according to DB. - """ - pre_create_data = {"use_selection": True} - item_id, item = self._get_item(placeholder) - get_stub().select_items([item_id]) - self.populate_create_placeholder(placeholder, pre_create_data) - - # apply settings for populated composition - item_id, metadata_item = self._get_item(placeholder) - set_settings(True, True, [item_id]) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - -class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin): - identifier = "aftereffects.load" - label = "AfterEffects load" - - def create_placeholder(self, placeholder_data): - """Creates AE's Placeholder item in Project items list. - - Sets dummy resolution/duration/fps settings, will be replaced when - populated. - """ - stub = get_stub() - name = "LOADERPLACEHOLDER" - item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Use Openpype Loader from `placeholder` to create new FootageItems - - New FootageItems are created, files are imported. - """ - self.populate_load_placeholder(placeholder) - errors = placeholder.get_errors() - stub = get_stub() - if errors: - stub.print_msg("\n".join(errors)) - else: - if not placeholder.data["keep_placeholder"]: - metadata = stub.get_metadata() - for item in metadata: - if not item.get("is_placeholder"): - continue - scene_identifier = item.get("uuid") - if (scene_identifier and - scene_identifier == placeholder.scene_identifier): - stub.delete_item(item["members"][0]) - stub.remove_instance(placeholder.scene_identifier, metadata) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def load_succeed(self, placeholder, container): - placeholder_item_id, _ = self._get_item(placeholder) - item_id = container.id - get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) - - def build_workfile_template(*args, **kwargs): builder = AETemplateBuilder(registered_host()) builder.build_template(*args, **kwargs) diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py index c28042b6ae..ebd4b8f944 100644 --- a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py @@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance): class CollectAERender(publish.AbstractCollectRender): - order = pyblish.api.CollectorOrder + 0.405 + order = pyblish.api.CollectorOrder + 0.100 label = "Collect After Effects Render Layers" hosts = ["aftereffects"] @@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender): if "review" in instance.families: # to skip ExtractReview locally instance.families.remove("review") + instance.deadline = inst.data.get("deadline") instances.append(instance) diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py new file mode 100644 index 0000000000..c7927f176f --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py @@ -0,0 +1,49 @@ +from ayon_core.pipeline.workfile.workfile_template_builder import ( + CreatePlaceholderItem, + PlaceholderCreateMixin +) +from ayon_core.hosts.aftereffects.api import get_stub +from ayon_core.hosts.aftereffects.api.lib import set_settings +import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb + + +class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin, + PlaceholderCreateMixin): + """Adds Create placeholder. + + This adds composition and runs Create + """ + identifier = "aftereffects.create" + label = "AfterEffects create" + + def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem: + return CreatePlaceholderItem( + scene_identifier=item_data["uuid"], + data=item_data["data"], + plugin=self + ) + + def create_placeholder(self, placeholder_data): + stub = get_stub() + name = "CREATEPLACEHOLDER" + item_id = stub.add_item(name, "COMP") + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Replace 'placeholder' with publishable instance. + + Renames prepared composition name, creates publishable instance, sets + frame/duration settings according to DB. + """ + pre_create_data = {"use_selection": True} + item_id, item = self._get_item(placeholder) + get_stub().select_items([item_id]) + self.populate_create_placeholder(placeholder, pre_create_data) + + # apply settings for populated composition + item_id, metadata_item = self._get_item(placeholder) + set_settings(True, True, [item_id]) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..7f7e4f49ce --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,60 @@ +from ayon_core.pipeline.workfile.workfile_template_builder import ( + LoadPlaceholderItem, + PlaceholderLoadMixin +) +from ayon_core.hosts.aftereffects.api import get_stub +import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb + + +class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin): + identifier = "aftereffects.load" + label = "AfterEffects load" + + def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem: + return LoadPlaceholderItem( + scene_identifier=item_data["uuid"], + data=item_data["data"], + plugin=self + ) + + def create_placeholder(self, placeholder_data): + """Creates AE's Placeholder item in Project items list. + + Sets dummy resolution/duration/fps settings, will be replaced when + populated. + """ + stub = get_stub() + name = "LOADERPLACEHOLDER" + item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Use Openpype Loader from `placeholder` to create new FootageItems + + New FootageItems are created, files are imported. + """ + self.populate_load_placeholder(placeholder) + errors = placeholder.get_errors() + stub = get_stub() + if errors: + stub.print_msg("\n".join(errors)) + else: + if not placeholder.data["keep_placeholder"]: + metadata = stub.get_metadata() + for item in metadata: + if not item.get("is_placeholder"): + continue + scene_identifier = item.get("uuid") + if (scene_identifier and + scene_identifier == placeholder.scene_identifier): + stub.delete_item(item["members"][0]) + stub.remove_instance(placeholder.scene_identifier, metadata) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def load_succeed(self, placeholder, container): + placeholder_item_id, _ = self._get_item(placeholder) + item_id = container.id + get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) diff --git a/client/ayon_core/hosts/blender/api/lib.py b/client/ayon_core/hosts/blender/api/lib.py index 458a275b51..32137f0fcd 100644 --- a/client/ayon_core/hosts/blender/api/lib.py +++ b/client/ayon_core/hosts/blender/api/lib.py @@ -33,7 +33,7 @@ def load_scripts(paths): if register: try: register() - except: + except: # noqa E722 traceback.print_exc() else: print("\nWarning! '%s' has no register function, " @@ -45,7 +45,7 @@ def load_scripts(paths): if unregister: try: unregister() - except: + except: # noqa E722 traceback.print_exc() def test_reload(mod): @@ -57,7 +57,7 @@ def load_scripts(paths): try: return importlib.reload(mod) - except: + except: # noqa E722 traceback.print_exc() def test_register(mod): diff --git a/client/ayon_core/hosts/blender/api/plugin.py b/client/ayon_core/hosts/blender/api/plugin.py index 6c9bfb6569..4a13d16805 100644 --- a/client/ayon_core/hosts/blender/api/plugin.py +++ b/client/ayon_core/hosts/blender/api/plugin.py @@ -143,13 +143,19 @@ def deselect_all(): if obj.mode != 'OBJECT': modes.append((obj, obj.mode)) bpy.context.view_layer.objects.active = obj - bpy.ops.object.mode_set(mode='OBJECT') + context_override = create_blender_context(active=obj) + with bpy.context.temp_override(**context_override): + bpy.ops.object.mode_set(mode='OBJECT') - bpy.ops.object.select_all(action='DESELECT') + context_override = create_blender_context() + with bpy.context.temp_override(**context_override): + bpy.ops.object.select_all(action='DESELECT') for p in modes: bpy.context.view_layer.objects.active = p[0] - bpy.ops.object.mode_set(mode=p[1]) + context_override = create_blender_context(active=p[0]) + with bpy.context.temp_override(**context_override): + bpy.ops.object.mode_set(mode=p[1]) bpy.context.view_layer.objects.active = active diff --git a/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py index 6178578081..a49bb40d9a 100644 --- a/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py +++ b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py @@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader): def _process(self, libpath, asset_group, group_name): plugin.deselect_all() - bpy.ops.wm.alembic_import(filepath=libpath) + # Force the creation of the transform cache even if the camera + # doesn't have an animation. We use the cache to update the camera. + bpy.ops.wm.alembic_import( + filepath=libpath, always_add_cache_reader=True) objects = lib.get_selection() @@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader): self.log.info("Library already loaded, not updating...") return - mat = asset_group.matrix_basis.copy() + for obj in asset_group.children: + found = False + for constraint in obj.constraints: + if constraint.type == "TRANSFORM_CACHE": + constraint.cache_file.filepath = libpath.as_posix() + found = True + break + if not found: + # This is to keep compatibility with cameras loaded with + # the old loader + # Create a new constraint for the cache file + constraint = obj.constraints.new("TRANSFORM_CACHE") + bpy.ops.cachefile.open(filepath=libpath.as_posix()) + constraint.cache_file = bpy.data.cache_files[-1] + constraint.cache_file.scale = 1.0 - self._remove(asset_group) - self._process(str(libpath), asset_group, object_name) + # This is a workaround to set the object path. Blender doesn't + # load the list of object paths until the object is evaluated. + # This is a hack to force the object to be evaluated. + # The modifier doesn't need to be removed because camera + # objects don't have modifiers. + obj.modifiers.new( + name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE') + bpy.context.evaluated_depsgraph_get() - asset_group.matrix_basis = mat + constraint.object_path = ( + constraint.cache_file.object_paths[0].path) metadata["libpath"] = str(libpath) metadata["representation"] = repre_entity["id"] diff --git a/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py b/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py index 094f88fd8c..6590be515c 100644 --- a/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py +++ b/client/ayon_core/hosts/blender/plugins/publish/extract_abc.py @@ -2,6 +2,7 @@ import os import bpy +from ayon_core.lib import BoolDef from ayon_core.pipeline import publish from ayon_core.hosts.blender.api import plugin @@ -17,6 +18,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin): if not self.is_active(instance.data): return + attr_values = self.get_attr_values_from_data(instance.data) + # Define extract output file path stagingdir = self.staging_dir(instance) folder_name = instance.data["folderEntity"]["name"] @@ -46,7 +49,8 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin): bpy.ops.wm.alembic_export( filepath=filepath, selected=True, - flatten=False + flatten=False, + subdiv_schema=attr_values.get("subdiv_schema", False) ) plugin.deselect_all() @@ -65,6 +69,21 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin): self.log.debug("Extracted instance '%s' to: %s", instance.name, representation) + @classmethod + def get_attribute_defs(cls): + return [ + BoolDef( + "subdiv_schema", + label="Alembic Mesh Subdiv Schema", + tooltip="Export Meshes using Alembic's subdivision schema.\n" + "Enabling this includes creases with the export but " + "excludes the mesh's normals.\n" + "Enabling this usually result in smaller file size " + "due to lack of normals.", + default=False + ) + ] + class ExtractModelABC(ExtractABC): """Extract model as ABC.""" diff --git a/client/ayon_core/hosts/fusion/api/action.py b/client/ayon_core/hosts/fusion/api/action.py index 1643f1ce03..a0c6aafcb5 100644 --- a/client/ayon_core/hosts/fusion/api/action.py +++ b/client/ayon_core/hosts/fusion/api/action.py @@ -58,3 +58,55 @@ class SelectInvalidAction(pyblish.api.Action): self.log.info( "Selecting invalid tools: %s" % ", ".join(sorted(names)) ) + + +class SelectToolAction(pyblish.api.Action): + """Select invalid output tool in Fusion when plug-in failed. + + """ + + label = "Select saver" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context( + context, + plugin=plugin, + ) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + tools = [] + for instance in errored_instances: + + tool = instance.data.get("tool") + if tool is not None: + tools.append(tool) + else: + self.log.warning( + "Plug-in returned to be invalid, " + f"but has no saver for instance {instance.name}." + ) + + if not tools: + # Assume relevant comp is current comp and clear selection + self.log.info("No invalid tools found.") + comp = get_current_comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + return + + # Assume a single comp + first_tool = tools[0] + comp = first_tool.Comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + names = set() + for tool in tools: + flow.Select(tool, True) + comp.SetActiveTool(tool) + names.add(tool.Name) + self.log.info( + "Selecting invalid tools: %s" % ", ".join(sorted(names)) + ) diff --git a/client/ayon_core/hosts/fusion/api/lib.py b/client/ayon_core/hosts/fusion/api/lib.py index 08722463e1..7f7d20010d 100644 --- a/client/ayon_core/hosts/fusion/api/lib.py +++ b/client/ayon_core/hosts/fusion/api/lib.py @@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False): def _on_repair(): attributes = dict() for key, comp_key, _label in validations: - value = folder_value[key] + value = folder_attributes[key] comp_key_full = "Comp.FrameFormat.{}".format(comp_key) attributes[comp_key_full] = value comp.SetPrefs(attributes) diff --git a/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py b/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py index e70d4b844e..113a1ffe59 100644 --- a/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py +++ b/client/ayon_core/hosts/fusion/hooks/pre_fusion_launch_menu_hook.py @@ -1,5 +1,5 @@ import os -from ayon_core.lib import PreLaunchHook +from ayon_applications import PreLaunchHook from ayon_core.hosts.fusion import FUSION_HOST_DIR diff --git a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py index 7a2844d5db..9c04e59717 100644 --- a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py @@ -52,7 +52,7 @@ class CollectFusionRender( if product_type not in ["render", "image"]: continue - task_name = context.data["task"] + task_name = inst.data["task"] tool = inst.data["transientData"]["tool"] instance_families = inst.data.get("families", []) @@ -115,6 +115,7 @@ class CollectFusionRender( if "review" in instance.families: # to skip ExtractReview locally instance.families.remove("review") + instance.deadline = inst.data.get("deadline") instances.append(instance) diff --git a/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_in_context.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_in_context.py new file mode 100644 index 0000000000..3aa6fb452f --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_in_context.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +"""Validate if instance context is the same as publish context.""" + +import pyblish.api +from ayon_core.hosts.fusion.api.action import SelectToolAction +from ayon_core.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin +) + + +class ValidateInstanceInContextFusion(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validator to check if instance context matches context of publish. + + When working in per-shot style you always publish data in context of + current asset (shot). This validator checks if this is so. It is optional + so it can be disabled when needed. + """ + # Similar to maya and houdini-equivalent `ValidateInstanceInContext` + + order = ValidateContentsOrder + label = "Instance in same Context" + optional = True + hosts = ["fusion"] + actions = [SelectToolAction, RepairAction] + + def process(self, instance): + if not self.is_active(instance.data): + return + + instance_context = self.get_context(instance.data) + context = self.get_context(instance.context.data) + if instance_context != context: + context_label = "{} > {}".format(*context) + instance_label = "{} > {}".format(*instance_context) + + raise PublishValidationError( + message=( + "Instance '{}' publishes to different asset than current " + "context: {}. Current context: {}".format( + instance.name, instance_label, context_label + ) + ), + description=( + "## Publishing to a different asset\n" + "There are publish instances present which are publishing " + "into a different asset than your current context.\n\n" + "Usually this is not what you want but there can be cases " + "where you might want to publish into another asset or " + "shot. If that's the case you can disable the validation " + "on the instance to ignore it." + ) + ) + + @classmethod + def repair(cls, instance): + + create_context = instance.context.data["create_context"] + instance_id = instance.data.get("instance_id") + created_instance = create_context.get_instance_by_id( + instance_id + ) + if created_instance is None: + raise RuntimeError( + f"No CreatedInstances found with id '{instance_id} " + f"in {create_context.instances_by_id}" + ) + + context_asset, context_task = cls.get_context(instance.context.data) + created_instance["folderPath"] = context_asset + created_instance["task"] = context_task + create_context.save_changes() + + @staticmethod + def get_context(data): + """Return asset, task from publishing context data""" + return data["folderPath"], data["task"] diff --git a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py index 156e2ac6ba..c63eb114e5 100644 --- a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py @@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender): outputFormat=info[1], outputStartFrame=info[3], leadingZeros=info[2], - ignoreFrameHandleCheck=True + ignoreFrameHandleCheck=True, + #todo: inst is not available, must be determined, fix when + #reworking to Publisher + # deadline=inst.data.get("deadline") ) render_instance.context = context diff --git a/client/ayon_core/hosts/hiero/api/events.py b/client/ayon_core/hosts/hiero/api/events.py index 304605e24e..663004abd2 100644 --- a/client/ayon_core/hosts/hiero/api/events.py +++ b/client/ayon_core/hosts/hiero/api/events.py @@ -8,6 +8,7 @@ from .lib import ( sync_avalon_data_to_workfile, launch_workfiles_app, before_project_save, + apply_colorspace_project ) from .tags import add_tags_to_workfile from .menu import update_menu_task_label @@ -44,6 +45,8 @@ def afterNewProjectCreated(event): # reset workfiles startup not to open any more in session os.environ["WORKFILES_STARTUP"] = "0" + apply_colorspace_project() + def beforeProjectLoad(event): log.info("before project load event...") @@ -122,6 +125,7 @@ def register_hiero_events(): except RuntimeError: pass + def register_events(): """ Adding all callbacks. diff --git a/client/ayon_core/hosts/hiero/api/lib.py b/client/ayon_core/hosts/hiero/api/lib.py index 8682ff7780..456a68f125 100644 --- a/client/ayon_core/hosts/hiero/api/lib.py +++ b/client/ayon_core/hosts/hiero/api/lib.py @@ -11,7 +11,6 @@ import warnings import json import ast import secrets -import shutil import hiero from qtpy import QtWidgets, QtCore @@ -36,9 +35,6 @@ from .constants import ( DEFAULT_SEQUENCE_NAME, DEFAULT_BIN_NAME ) -from ayon_core.pipeline.colorspace import ( - get_imageio_config -) class _CTX: @@ -105,9 +101,9 @@ def flatten(list_): def get_current_project(remove_untitled=False): - projects = flatten(hiero.core.projects()) + projects = hiero.core.projects() if not remove_untitled: - return next(iter(projects)) + return projects[0] # if remove_untitled for proj in projects: @@ -1050,30 +1046,84 @@ def _set_hrox_project_knobs(doc, **knobs): def apply_colorspace_project(): - project_name = get_current_project_name() - # get path the the active projects - project = get_current_project(remove_untitled=True) - current_file = project.path() - - # close the active project - project.close() + """Apply colorspaces from settings. + Due to not being able to set the project settings through the Python API, + we need to do use some dubious code to find the widgets and set them. It is + possible to set the project settings without traversing through the widgets + but it involves reading the hrox files from disk with XML, so no in-memory + support. See https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa + for more details. + """ # get presets for hiero + project_name = get_current_project_name() imageio = get_project_settings(project_name)["hiero"]["imageio"] presets = imageio.get("workfile") + # Open Project Settings UI. + for act in hiero.ui.registeredActions(): + if act.objectName() == "foundry.project.settings": + act.trigger() + + # Find widgets from their sibling label. + labels = { + "Working Space:": "workingSpace", + "Viewer:": "viewerLut", + "Thumbnails:": "thumbnailLut", + "Monitor Out:": "monitorOutLut", + "8 Bit Files:": "eightBitLut", + "16 Bit Files:": "sixteenBitLut", + "Log Files:": "logLut", + "Floating Point Files:": "floatLut" + } + widgets = {x: None for x in labels.values()} + + def _recursive_children(widget, labels, widgets): + children = widget.children() + for count, child in enumerate(children): + if isinstance(child, QtWidgets.QLabel): + if child.text() in labels.keys(): + widgets[labels[child.text()]] = children[count + 1] + _recursive_children(child, labels, widgets) + + app = QtWidgets.QApplication.instance() + title = "Project Settings" + for widget in app.topLevelWidgets(): + if isinstance(widget, QtWidgets.QMainWindow): + if widget.windowTitle() != title: + continue + _recursive_children(widget, labels, widgets) + widget.close() + + msg = "Setting value \"{}\" is not a valid option for \"{}\"" + for key, widget in widgets.items(): + options = [widget.itemText(i) for i in range(widget.count())] + setting_value = presets[key] + assert setting_value in options, msg.format(setting_value, key) + widget.setCurrentText(presets[key]) + + # This code block is for setting up project colorspaces for files on disk. + # Due to not having Python API access to set the project settings, the + # Foundry recommended way is to modify the hrox files on disk with XML. See + # this forum thread for more details; + # https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa + ''' # backward compatibility layer # TODO: remove this after some time - config_data = get_imageio_config( - project_name=get_current_project_name(), - host_name="hiero" - ) + config_data = get_current_context_imageio_config_preset() if config_data: presets.update({ "ocioConfigName": "custom" }) + # get path the the active projects + project = get_current_project() + current_file = project.path() + + msg = "The project needs to be saved to disk to apply colorspace settings." + assert current_file, msg + # save the workfile as subversion "comment:_colorspaceChange" split_current_file = os.path.splitext(current_file) copy_current_file = current_file @@ -1116,6 +1166,7 @@ def apply_colorspace_project(): # open the file as current project hiero.core.openProject(copy_current_file) + ''' def apply_colorspace_clips(): @@ -1125,10 +1176,8 @@ def apply_colorspace_clips(): # get presets for hiero imageio = get_project_settings(project_name)["hiero"]["imageio"] - from pprint import pprint presets = imageio.get("regexInputs", {}).get("inputs", {}) - pprint(presets) for clip in clips: clip_media_source_path = clip.mediaSource().firstpath() clip_name = clip.name() diff --git a/client/ayon_core/hosts/hiero/api/tags.py b/client/ayon_core/hosts/hiero/api/tags.py index 5abfee75d0..d4acb23493 100644 --- a/client/ayon_core/hosts/hiero/api/tags.py +++ b/client/ayon_core/hosts/hiero/api/tags.py @@ -144,7 +144,7 @@ def add_tags_to_workfile(): # Get project task types. project_name = get_current_project_name() project_entity = ayon_api.get_project(project_name) - task_types = project_entity["taskType"] + task_types = project_entity["taskTypes"] nks_pres_tags["[Tasks]"] = {} log.debug("__ tasks: {}".format(task_types)) for task_type in task_types: diff --git a/client/ayon_core/hosts/hiero/api/workio.py b/client/ayon_core/hosts/hiero/api/workio.py index 4c2416ca38..6e8fc20172 100644 --- a/client/ayon_core/hosts/hiero/api/workio.py +++ b/client/ayon_core/hosts/hiero/api/workio.py @@ -51,13 +51,12 @@ def open_file(filepath): project = hiero.core.projects()[-1] - # open project file - hiero.core.openProject(filepath.replace(os.path.sep, "/")) - - # close previous project - project.close() - - + # Close previous project if its different to the current project. + filepath = filepath.replace(os.path.sep, "/") + if project.path().replace(os.path.sep, "/") != filepath: + # open project file + hiero.core.openProject(filepath) + project.close() return True diff --git a/client/ayon_core/hosts/houdini/api/lib.py b/client/ayon_core/hosts/houdini/api/lib.py index 63f51b2423..51a591d942 100644 --- a/client/ayon_core/hosts/houdini/api/lib.py +++ b/client/ayon_core/hosts/houdini/api/lib.py @@ -833,6 +833,43 @@ def get_current_context_template_data_with_folder_attrs(): return template_data +def set_review_color_space(opengl_node, review_color_space="", log=None): + """Set ociocolorspace parameter for the given OpenGL node. + + Set `ociocolorspace` parameter of the given OpenGl node + to to the given review_color_space value. + If review_color_space is empty, a default colorspace corresponding to + the display & view of the current Houdini session will be used. + + Args: + opengl_node (hou.Node): ROP node to set its ociocolorspace parm. + review_color_space (str): Colorspace value for ociocolorspace parm. + log (logging.Logger): Logger to log to. + """ + + if log is None: + log = self.log + + # Set Color Correction parameter to OpenColorIO + colorcorrect_parm = opengl_node.parm("colorcorrect") + if colorcorrect_parm.eval() != 2: + colorcorrect_parm.set(2) + log.debug( + "'Color Correction' parm on '{}' has been set to" + " 'OpenColorIO'".format(opengl_node.path()) + ) + + opengl_node.setParms( + {"ociocolorspace": review_color_space} + ) + + log.debug( + "'OCIO Colorspace' parm on '{}' has been set to " + "the view color space '{}'" + .format(opengl_node, review_color_space) + ) + + def get_context_var_changes(): """get context var changes.""" diff --git a/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py new file mode 100644 index 0000000000..7d41979600 --- /dev/null +++ b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py @@ -0,0 +1,64 @@ +from ayon_applications import PreLaunchHook, LaunchTypes + + +class SetDefaultDisplayView(PreLaunchHook): + """Set default view and default display for houdini via OpenColorIO. + + Houdini's defaultDisplay and defaultView are set by + setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS' + environment variables respectively. + + More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up + """ + + app_groups = {"houdini"} + launch_types = {LaunchTypes.local} + + def execute(self): + + OCIO = self.launch_context.env.get("OCIO") + + # This is a cheap way to skip this hook if either global color + # management or houdini color management was disabled because the + # OCIO var would be set by the global OCIOEnvHook + if not OCIO: + return + + # workfile settings added in '0.2.13' + houdini_color_settings = \ + self.data["project_settings"]["houdini"]["imageio"].get("workfile") + + if not houdini_color_settings: + self.log.info("Hook 'SetDefaultDisplayView' requires Houdini " + "addon version >= '0.2.13'") + return + + if not houdini_color_settings["enabled"]: + self.log.info( + "Houdini workfile color management is disabled." + ) + return + + # 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked + # as Admins can add them in Ayon env vars or Ayon tools. + + default_display = houdini_color_settings["default_display"] + if default_display: + # get 'OCIO_ACTIVE_DISPLAYS' value if exists. + self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display) + + default_view = houdini_color_settings["default_view"] + if default_view: + # get 'OCIO_ACTIVE_VIEWS' value if exists. + self._set_context_env("OCIO_ACTIVE_VIEWS", default_view) + + def _set_context_env(self, env_var, default_value): + env_value = self.launch_context.env.get(env_var, "") + new_value = ":".join( + key for key in [default_value, env_value] if key + ) + self.log.info( + "Setting {} environment to: {}" + .format(env_var, new_value) + ) + self.launch_context.env[env_var] = new_value diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py index f65b54a452..1208cfc1ea 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py @@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator): # Default extension ext = "exr" - # Default to split export and render jobs - export_job = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): import hou + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] # Remove the active, we are checking the bypass flag of the nodes instance_data.pop("active", None) @@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator): # Add chunk size attribute instance_data["chunkSize"] = 1 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateArnoldRop, self).create( product_name, @@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator): "ar_exr_half_precision": 1 # half precision } - if pre_create_data.get("export_job"): + if pre_create_data.get("render_target") == "farm_split": ass_filepath = \ "{export_dir}{product_name}/{product_name}.$F4.ass".format( export_dir=hou.text.expandString("$HIP/pyblish/ass/"), @@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator): to_lock = ["productType", "id"] self.lock_parameters(instance_node, to_lock) - def get_pre_create_attr_defs(self): - attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs() + def get_instance_attr_defs(self): + """get instance attribute definitions. + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target), + ] + + def get_pre_create_attr_defs(self): image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("export_job", - label="Split export and render jobs", - default=self.export_job), + attrs = [ EnumDef("image_format", image_format_enum, default=self.ext, - label="Image Format Options") + label="Image Format Options"), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py index e91ddbc0ac..48cf5057ab 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py @@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator): product_type = "karma_rop" icon = "magic" + # Default render target + render_target = "farm" + def create(self, product_name, instance_data, pre_create_data): import hou # noqa + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "karma"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateKarmaROP, self).create( product_name, @@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator): to_lock = ["productType", "id"] self.lock_parameters(instance_node, to_lock) - def get_pre_create_attr_defs(self): - attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs() + def get_instance_attr_defs(self): + """get instance attribute definitions. + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + + + def get_pre_create_attr_defs(self): image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), + attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs() + + attrs += [ EnumDef("image_format", image_format_enum, default="exr", @@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator): decimals=0), BoolDef("cam_res", label="Camera Resolution", - default=False) + default=False), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py index 64ecf428e9..05b4431aba 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py @@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator): product_type = "mantra_rop" icon = "magic" - # Default to split export and render jobs - export_job = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): import hou # noqa + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "ifd"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateMantraROP, self).create( product_name, @@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator): "vm_picture": filepath, } - if pre_create_data.get("export_job"): + if pre_create_data.get("render_target") == "farm_split": ifd_filepath = \ "{export_dir}{product_name}/{product_name}.$F4.ifd".format( export_dir=hou.text.expandString("$HIP/pyblish/ifd/"), @@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator): to_lock = ["productType", "id"] self.lock_parameters(instance_node, to_lock) - def get_pre_create_attr_defs(self): - attrs = super(CreateMantraROP, self).get_pre_create_attr_defs() + def get_instance_attr_defs(self): + """get instance attribute definitions. + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + + def get_pre_create_attr_defs(self): image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("export_job", - label="Split export and render jobs", - default=self.export_job), + attrs = super(CreateMantraROP, self).get_pre_create_attr_defs() + + attrs += [ EnumDef("image_format", image_format_enum, default="exr", @@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator): label="Override Camera Resolution", tooltip="Override the current camera " "resolution, recommended for IPR.", - default=False) + default=False), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py index 1cd239e929..3ecb09ee9b 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py @@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator): ext = "exr" multi_layered_mode = "No Multi-Layered EXR File" - # Default to split export and render jobs - split_render = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "Redshift_ROP"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateRedshiftROP, self).create( product_name, @@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator): rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs" parms["RS_archive_file"] = rs_filepath - if pre_create_data.get("split_render", self.split_render): + if pre_create_data.get("render_target") == "farm_split": parms["RS_archive_enable"] = 1 instance_node.setParms(parms) @@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator): return super(CreateRedshiftROP, self).remove_instances(instances) + def get_instance_attr_defs(self): + """get instance attribute definitions. + + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + def get_pre_create_attr_defs(self): - attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs() + image_format_enum = [ "exr", "tif", "jpg", "png", ] + multi_layered_mode = [ "No Multi-Layered EXR File", "Full Multi-Layered EXR File" ] - - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("split_render", - label="Split export and render jobs", - default=self.split_render), + attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs() + attrs += [ EnumDef("image_format", image_format_enum, default=self.ext, @@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator): EnumDef("multi_layered_mode", multi_layered_mode, default=self.multi_layered_mode, - label="Multi-Layered EXR") + label="Multi-Layered EXR"), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_review.py b/client/ayon_core/hosts/houdini/plugins/create/create_review.py index 18f7ce498d..f5e4d4ce64 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_review.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_review.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Creator plugin for creating openGL reviews.""" -from ayon_core.hosts.houdini.api import plugin +from ayon_core.hosts.houdini.api import lib, plugin from ayon_core.lib import EnumDef, BoolDef, NumberDef import os @@ -14,6 +14,16 @@ class CreateReview(plugin.HoudiniCreator): label = "Review" product_type = "review" icon = "video-camera" + review_color_space = "" + + def apply_settings(self, project_settings): + super(CreateReview, self).apply_settings(project_settings) + # workfile settings added in '0.2.13' + color_settings = project_settings["houdini"]["imageio"].get( + "workfile", {} + ) + if color_settings.get("enabled"): + self.review_color_space = color_settings.get("review_color_space") def create(self, product_name, instance_data, pre_create_data): @@ -85,10 +95,20 @@ class CreateReview(plugin.HoudiniCreator): instance_node.setParms(parms) - # Set OCIO Colorspace to the default output colorspace + # Set OCIO Colorspace to the default colorspace # if there's OCIO if os.getenv("OCIO"): - self.set_colorcorrect_to_default_view_space(instance_node) + # Fall to the default value if cls.review_color_space is empty. + if not self.review_color_space: + # cls.review_color_space is an empty string + # when the imageio/workfile setting is disabled or + # when the Review colorspace setting is empty. + from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa + self.review_color_space = get_default_display_view_colorspace() + + lib.set_review_color_space(instance_node, + self.review_color_space, + self.log) to_lock = ["id", "productType"] @@ -131,23 +151,3 @@ class CreateReview(plugin.HoudiniCreator): minimum=0.0001, decimals=3) ] - - def set_colorcorrect_to_default_view_space(self, - instance_node): - """Set ociocolorspace to the default output space.""" - from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa - - # set Color Correction parameter to OpenColorIO - instance_node.setParms({"colorcorrect": 2}) - - # Get default view space for ociocolorspace parm. - default_view_space = get_default_display_view_colorspace() - instance_node.setParms( - {"ociocolorspace": default_view_space} - ) - - self.log.debug( - "'OCIO Colorspace' parm on '{}' has been set to " - "the default view color space '{}'" - .format(instance_node, default_view_space) - ) diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py index 5ed9e848a7..9e4633e745 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py @@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator): icon = "magic" ext = "exr" - # Default to split export and render jobs - export_job = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "vray_renderer"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateVrayROP, self).create( product_name, @@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator): "SettingsEXR_bits_per_channel": "16" # half precision } - if pre_create_data.get("export_job"): + if pre_create_data.get("render_target") == "farm_split": scene_filepath = \ "{export_dir}{product_name}/{product_name}.$F4.vrscene".format( export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"), @@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator): return super(CreateVrayROP, self).remove_instances(instances) + def get_instance_attr_defs(self): + """get instance attribute definitions. + + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + def get_pre_create_attr_defs(self): - attrs = super(CreateVrayROP, self).get_pre_create_attr_defs() image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("export_job", - label="Split export and render jobs", - default=self.export_job), + attrs = super(CreateVrayROP, self).get_pre_create_attr_defs() + + attrs += [ EnumDef("image_format", image_format_enum, default=self.ext, @@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator): "if enabled", default=False) ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py index a958509e25..40a607e81a 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py @@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator): # write workfile information to context container. op_ctx = hou.node(CONTEXT_CONTAINER) if not op_ctx: - op_ctx = self.create_context_node() + op_ctx = self.host.create_context_node() workfile_data = {"workfile": current_instance.data_to_store()} imprint(op_ctx, workfile_data) diff --git a/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py b/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py index 1bb9043cd0..5f04781501 100644 --- a/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py +++ b/client/ayon_core/hosts/houdini/plugins/load/load_alembic.py @@ -45,33 +45,11 @@ class AbcLoader(load.LoaderPlugin): alembic = container.createNode("alembic", node_name=node_name) alembic.setParms({"fileName": file_path}) - # Add unpack node - unpack_name = "unpack_{}".format(name) - unpack = container.createNode("unpack", node_name=unpack_name) - unpack.setInput(0, alembic) - unpack.setParms({"transfer_attributes": "path"}) + # Position nodes nicely + container.moveToGoodPosition() + container.layoutChildren() - # Add normal to points - # Order of menu ['point', 'vertex', 'prim', 'detail'] - normal_name = "normal_{}".format(name) - normal_node = container.createNode("normal", node_name=normal_name) - normal_node.setParms({"type": 0}) - - normal_node.setInput(0, unpack) - - null = container.createNode("null", node_name="OUT") - null.setInput(0, normal_node) - - # Ensure display flag is on the Alembic input node and not on the OUT - # node to optimize "debug" displaying in the viewport. - alembic.setDisplayFlag(True) - - # Set new position for unpack node else it gets cluttered - nodes = [container, alembic, unpack, normal_node, null] - for nr, node in enumerate(nodes): - node.setPosition([0, (0 - nr)]) - - self[:] = nodes + nodes = [container, alembic] return pipeline.containerise( node_name, diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py index 7fe38555a3..53a3e52717 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py @@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): default_prefix = evalParmNoFrame(rop, "ar_picture") render_products = [] - # Store whether we are splitting the render job (export + render) - split_render = bool(rop.parm("ar_ass_export_enable").eval()) - instance.data["splitRender"] = split_render export_prefix = None export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "ar_ass_file", pad_character="0" ) @@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): "": self.generate_expected_files(instance, beauty_product) } + # Assume it's a multipartExr Render. + multipartExr = True + num_aovs = rop.evalParm("ar_aovs") + # TODO: Check the following logic. + # as it always assumes that all AOV are not merged. for index in range(1, num_aovs + 1): # Skip disabled AOVs if not rop.evalParm("ar_enable_aov{}".format(index)): @@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): files_by_aov[label] = self.generate_expected_files(instance, aov_product) + # Set to False as soon as we have a separated aov. + multipartExr = False + + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr + for product in render_products: self.log.debug("Found render product: {}".format(product)) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py index 040ad68a1a..e931c7bf1b 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py @@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api import lib class CollectDataforCache(pyblish.api.InstancePlugin): """Collect data for caching to Deadline.""" - order = pyblish.api.CollectorOrder + 0.04 + # Run after Collect Frames + order = pyblish.api.CollectorOrder + 0.11 families = ["ass", "pointcache", "mantraifd", "redshiftproxy", "vdbcache"] diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py new file mode 100644 index 0000000000..586aa2da57 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py @@ -0,0 +1,35 @@ +import pyblish.api + + +class CollectFarmInstances(pyblish.api.InstancePlugin): + """Collect instances for farm render.""" + + order = pyblish.api.CollectorOrder + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + hosts = ["houdini"] + targets = ["local", "remote"] + label = "Collect farm instances" + + def process(self, instance): + + creator_attribute = instance.data["creator_attributes"] + + # Collect Render Target + if creator_attribute.get("render_target") not in { + "farm_split", "farm" + }: + instance.data["farm"] = False + instance.data["splitRender"] = False + self.log.debug("Render on farm is disabled. " + "Skipping farm collecting.") + return + + instance.data["farm"] = True + instance.data["splitRender"] = ( + creator_attribute.get("render_target") == "farm_split" + ) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py index a643ab0d38..b38ebc6e2f 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py @@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin): label = "Collect Frames" families = ["vdbcache", "imagesequence", "ass", "mantraifd", "redshiftproxy", "review", - "bgeo"] + "pointcache"] def process(self, instance): diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py index 78651b0c69..662ed7ae30 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py @@ -55,6 +55,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin): beauty_product) } + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + # By default karma render is a multipart Exr. + instance.data["multipartExr"] = True + filenames = list(render_products) instance.data["files"] = filenames instance.data["renderProducts"] = colorspace.ARenderProduct() diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py new file mode 100644 index 0000000000..474002e1ee --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py @@ -0,0 +1,137 @@ +import os +import pyblish.api +from ayon_core.pipeline.create import get_product_name +from ayon_core.pipeline.farm.patterning import match_aov_pattern +from ayon_core.pipeline.publish import ( + get_plugin_settings, + apply_plugin_settings_automatically +) + + +class CollectLocalRenderInstances(pyblish.api.InstancePlugin): + """Collect instances for local render. + + Agnostic Local Render Collector. + """ + + # this plugin runs after Collect Render Products + order = pyblish.api.CollectorOrder + 0.12 + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + hosts = ["houdini"] + label = "Collect local render instances" + + use_deadline_aov_filter = False + aov_filter = {"host_name": "houdini", + "value": [".*([Bb]eauty).*"]} + + @classmethod + def apply_settings(cls, project_settings): + # Preserve automatic settings applying logic + settings = get_plugin_settings(plugin=cls, + project_settings=project_settings, + log=cls.log, + category="houdini") + apply_plugin_settings_automatically(cls, settings, logger=cls.log) + + if not cls.use_deadline_aov_filter: + # get aov_filter from collector settings + # and restructure it as match_aov_pattern requires. + cls.aov_filter = { + cls.aov_filter["host_name"]: cls.aov_filter["value"] + } + else: + # get aov_filter from deadline settings + cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"] + cls.aov_filter = { + item["name"]: item["value"] + for item in cls.aov_filter + } + + def process(self, instance): + + if instance.data["farm"]: + self.log.debug("Render on farm is enabled. " + "Skipping local render collecting.") + return + + # Create Instance for each AOV. + context = instance.context + expectedFiles = next(iter(instance.data["expectedFiles"]), {}) + + product_type = "render" # is always render + product_group = get_product_name( + context.data["projectName"], + context.data["taskEntity"]["name"], + context.data["taskEntity"]["taskType"], + context.data["hostName"], + product_type, + instance.data["productName"] + ) + + for aov_name, aov_filepaths in expectedFiles.items(): + product_name = product_group + + if aov_name: + product_name = "{}_{}".format(product_name, aov_name) + + # Create instance for each AOV + aov_instance = context.create_instance(product_name) + + # Prepare Representation for each AOV + aov_filenames = [os.path.basename(path) for path in aov_filepaths] + staging_dir = os.path.dirname(aov_filepaths[0]) + ext = aov_filepaths[0].split(".")[-1] + + # Decide if instance is reviewable + preview = False + if instance.data.get("multipartExr", False): + # Add preview tag because its multipartExr. + preview = True + else: + # Add Preview tag if the AOV matches the filter. + preview = match_aov_pattern( + "houdini", self.aov_filter, aov_filenames[0] + ) + + preview = preview and instance.data.get("review", False) + + # Support Single frame. + # The integrator wants single files to be a single + # filename instead of a list. + # More info: https://github.com/ynput/ayon-core/issues/238 + if len(aov_filenames) == 1: + aov_filenames = aov_filenames[0] + + aov_instance.data.update({ + # 'label': label, + "task": instance.data["task"], + "folderPath": instance.data["folderPath"], + "frameStart": instance.data["frameStartHandle"], + "frameEnd": instance.data["frameEndHandle"], + "productType": product_type, + "family": product_type, + "productName": product_name, + "productGroup": product_group, + "families": ["render.local.hou", "review"], + "instance_node": instance.data["instance_node"], + "representations": [ + { + "stagingDir": staging_dir, + "ext": ext, + "name": ext, + "tags": ["review"] if preview else [], + "files": aov_filenames, + "frameStart": instance.data["frameStartHandle"], + "frameEnd": instance.data["frameEndHandle"] + } + ] + }) + + # Skip integrating original render instance. + # We are not removing it because it's used to trigger the render. + instance.data["integrate"] = False diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py index df9acc4b61..7b247768fc 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py @@ -44,12 +44,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): default_prefix = evalParmNoFrame(rop, "vm_picture") render_products = [] - # Store whether we are splitting the render job (export + render) - split_render = bool(rop.parm("soho_outputmode").eval()) - instance.data["splitRender"] = split_render export_prefix = None export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "soho_diskfile", pad_character="0" ) @@ -74,6 +71,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): beauty_product) } + # Assume it's a multipartExr Render. + multipartExr = True + + # TODO: This logic doesn't take into considerations + # cryptomatte defined in 'Images > Cryptomatte' aov_numbers = rop.evalParm("vm_numaux") if aov_numbers > 0: # get the filenames of the AOVs @@ -93,6 +95,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa + # Set to False as soon as we have a separated aov. + multipartExr = False + + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr + for product in render_products: self.log.debug("Found render product: %s" % product) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py index 55a55bb12a..ce90ae2413 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py @@ -42,11 +42,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix") beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix") - # Store whether we are splitting the render job (export + render) - split_render = bool(rop.parm("RS_archive_enable").eval()) - instance.data["splitRender"] = split_render + export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "RS_archive_file", pad_character="0" ) @@ -63,9 +61,12 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): full_exr_mode = (rop.evalParm("RS_outputMultilayerMode") == "2") if full_exr_mode: # Ignore beauty suffix if full mode is enabled - # As this is what the rop does. + # As this is what the rop does. beauty_suffix = "" + # Assume it's a multipartExr Render. + multipartExr = True + # Default beauty/main layer AOV beauty_product = self.get_render_product_name( prefix=default_prefix, suffix=beauty_suffix @@ -75,7 +76,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): beauty_suffix: self.generate_expected_files(instance, beauty_product) } - + aovs_rop = rop.parm("RS_aovGetFromNode").evalAsNode() if aovs_rop: rop = aovs_rop @@ -98,13 +99,21 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): if rop.parm(f"RS_aovID_{i}").evalAsString() == "CRYPTOMATTE" or \ not full_exr_mode: - + aov_product = self.get_render_product_name(aov_prefix, aov_suffix) render_products.append(aov_product) files_by_aov[aov_suffix] = self.generate_expected_files(instance, aov_product) # noqa + # Set to False as soon as we have a separated aov. + multipartExr = False + + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr + for product in render_products: self.log.debug("Found render product: %s" % product) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py index 9671945b9a..ed2de785a2 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py @@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin): label = "Collect Review Data" # This specific order value is used so that # this plugin runs after CollectRopFrameRange - order = pyblish.api.CollectorOrder + 0.1 + # Also after CollectLocalRenderInstances + order = pyblish.api.CollectorOrder + 0.13 hosts = ["houdini"] families = ["review"] @@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin): ropnode_path = instance.data["instance_node"] ropnode = hou.node(ropnode_path) - camera_path = ropnode.parm("camera").eval() + # Get camera based on the instance_node type. + camera_path = self._get_camera_path(ropnode) camera_node = hou.node(camera_path) if not camera_node: self.log.warning("No valid camera node found on review node: " @@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin): # Store focal length in `burninDataMembers` burnin_members = instance.data.setdefault("burninDataMembers", {}) burnin_members["focalLength"] = focal_length + + def _get_camera_path(self, ropnode): + """Get the camera path associated with the given rop node. + + This function evaluates the camera parameter according to the + type of the given rop node. + + Returns: + Union[str, None]: Camera path or None. + + This function can return empty string if the camera + path is empty i.e. no camera path. + """ + + if ropnode.type().name() in { + "opengl", "karma", "ifd", "arnold" + }: + return ropnode.parm("camera").eval() + + elif ropnode.type().name() == "Redshift_ROP": + return ropnode.parm("RS_renderCamera").eval() + + elif ropnode.type().name() == "vray_renderer": + return ropnode.parm("render_camera").eval() + + return None diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py new file mode 100644 index 0000000000..78dc5fe11a --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py @@ -0,0 +1,22 @@ +import pyblish.api + + +class CollectReviewableInstances(pyblish.api.InstancePlugin): + """Collect Reviewable Instances. + + Basically, all instances of the specified families + with creator_attribure["review"] + """ + + order = pyblish.api.CollectorOrder + label = "Collect Reviewable Instances" + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + def process(self, instance): + creator_attribute = instance.data["creator_attributes"] + + instance.data["review"] = creator_attribute.get("review", False) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py index 62b7dcdd5d..c39b1db103 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py @@ -45,12 +45,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): render_products = [] # TODO: add render elements if render element - # Store whether we are splitting the render job in an export + render - split_render = rop.parm("render_export_mode").eval() == "2" - instance.data["splitRender"] = split_render export_prefix = None export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "render_export_filepath", pad_character="0" ) @@ -70,6 +67,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): "": self.generate_expected_files(instance, beauty_product)} + # Assume it's a multipartExr Render. + multipartExr = True + if instance.data.get("RenderElement", True): render_element = self.get_render_element_name(rop, default_prefix) if render_element: @@ -77,7 +77,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): render_products.append(renderpass) files_by_aov[aov] = self.generate_expected_files( instance, renderpass) + # Set to False as soon as we have a separated aov. + multipartExr = False + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr for product in render_products: self.log.debug("Found render product: %s" % product) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py index daf30b26ed..7ae476d2b4 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py @@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor): staging_dir = os.path.dirname(output) instance.data["stagingDir"] = staging_dir - file_name = os.path.basename(output) + if instance.data.get("frames"): + # list of files + files = instance.data["frames"] + else: + # single file + files = os.path.basename(output) # We run the render - self.log.info("Writing alembic '%s' to '%s'" % (file_name, + self.log.info("Writing alembic '%s' to '%s'" % (files, staging_dir)) render_rop(ropnode) @@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor): representation = { 'name': 'abc', 'ext': 'abc', - 'files': file_name, + 'files': files, "stagingDir": staging_dir, } instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py index c6dfb4332d..0fab69ef4a 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py @@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext import hou -class ExtractComposite(publish.Extractor): +class ExtractComposite(publish.Extractor, + publish.ColormanagedPyblishPluginMixin): order = pyblish.api.ExtractorOrder label = "Extract Composite (Image Sequence)" @@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor): "frameEnd": instance.data["frameEndHandle"], } - from pprint import pformat - - self.log.info(pformat(representation)) + if ext.lower() == "exr": + # Inject colorspace with 'scene_linear' as that's the + # default Houdini working colorspace and all extracted + # OpenEXR images should be in that colorspace. + # https://www.sidefx.com/docs/houdini/render/linear.html#image-formats + self.set_representation_colorspace( + representation, instance.context, + colorspace="scene_linear" + ) instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py index fabdfd9a9d..26a216e335 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py @@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop import hou -class ExtractOpenGL(publish.Extractor): +class ExtractOpenGL(publish.Extractor, + publish.ColormanagedPyblishPluginMixin): order = pyblish.api.ExtractorOrder - 0.01 label = "Extract OpenGL" @@ -18,6 +19,16 @@ class ExtractOpenGL(publish.Extractor): def process(self, instance): ropnode = hou.node(instance.data.get("instance_node")) + # This plugin is triggered when marking render as reviewable. + # Therefore, this plugin will run on over wrong instances. + # TODO: Don't run this plugin on wrong instances. + # This plugin should run only on review product type + # with instance node of opengl type. + if ropnode.type().name() != "opengl": + self.log.debug("Skipping OpenGl extraction. Rop node {} " + "is not an OpenGl node.".format(ropnode.path())) + return + output = ropnode.evalParm("picture") staging_dir = os.path.normpath(os.path.dirname(output)) instance.data["stagingDir"] = staging_dir @@ -46,6 +57,14 @@ class ExtractOpenGL(publish.Extractor): "camera_name": instance.data.get("review_camera") } + if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled + colorspace = ropnode.evalParm("ociocolorspace") + # inject colorspace data + self.set_representation_colorspace( + representation, instance.context, + colorspace=colorspace + ) + if "representations" not in instance.data: instance.data["representations"] = [] instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py new file mode 100644 index 0000000000..7b4762a25f --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py @@ -0,0 +1,74 @@ +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop +import hou +import os + + +class ExtractRender(publish.Extractor): + + order = pyblish.api.ExtractorOrder + label = "Extract Render" + hosts = ["houdini"] + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + def process(self, instance): + creator_attribute = instance.data["creator_attributes"] + product_type = instance.data["productType"] + rop_node = hou.node(instance.data.get("instance_node")) + + # Align split parameter value on rop node to the render target. + if instance.data["splitRender"]: + if product_type == "arnold_rop": + rop_node.setParms({"ar_ass_export_enable": 1}) + elif product_type == "mantra_rop": + rop_node.setParms({"soho_outputmode": 1}) + elif product_type == "redshift_rop": + rop_node.setParms({"RS_archive_enable": 1}) + elif product_type == "vray_rop": + rop_node.setParms({"render_export_mode": "2"}) + else: + if product_type == "arnold_rop": + rop_node.setParms({"ar_ass_export_enable": 0}) + elif product_type == "mantra_rop": + rop_node.setParms({"soho_outputmode": 0}) + elif product_type == "redshift_rop": + rop_node.setParms({"RS_archive_enable": 0}) + elif product_type == "vray_rop": + rop_node.setParms({"render_export_mode": "1"}) + + if instance.data.get("farm"): + self.log.debug("Render should be processed on farm, skipping local render.") + return + + if creator_attribute.get("render_target") == "local": + ropnode = hou.node(instance.data.get("instance_node")) + render_rop(ropnode) + + # `ExpectedFiles` is a list that includes one dict. + expected_files = instance.data["expectedFiles"][0] + # Each key in that dict is a list of files. + # Combine lists of files into one big list. + all_frames = [] + for value in expected_files.values(): + if isinstance(value, str): + all_frames.append(value) + elif isinstance(value, list): + all_frames.extend(value) + # Check missing frames. + # Frames won't exist if user cancels the render. + missing_frames = [ + frame + for frame in all_frames + if not os.path.exists(frame) + ] + if missing_frames: + # TODO: Use user friendly error reporting. + raise RuntimeError("Failed to complete render extraction. " + "Missing output files: {}".format( + missing_frames)) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py index fe8fa25f10..3e9291d5c2 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py @@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder + 9.0 hosts = ["houdini"] families = ["workfile", - "redshift_rop", - "arnold_rop", + "usdrender", "mantra_rop", "karma_rop", - "usdrender", + "redshift_rop", + "arnold_rop", + "vray_rop", + "render.local.hou", "publish.hou"] optional = True diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py index fdf03d5cba..91bd36018a 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- -import sys +import hou import pyblish.api -import six from ayon_core.pipeline import PublishValidationError @@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: raise PublishValidationError( - ("Output node(s) `{}` are incorrect. " - "See plug-in log for details.").format(invalid), - title=self.label + "Output node '{}' is incorrect. " + "See plug-in log for details.".format(invalid), + title=self.label, + description=( + "### Invalid COP output node\n\n" + "The output node path for the instance must be set to a " + "valid COP node path.\n\nSee the log for more details." + ) ) @classmethod def get_invalid(cls, instance): + output_node = instance.data.get("output_node") - import hou - - try: - output_node = instance.data["output_node"] - except KeyError: - six.reraise( - PublishValidationError, - PublishValidationError( - "Can't determine COP output node.", - title=cls.__name__), - sys.exc_info()[2] - ) - - if output_node is None: + if not output_node: node = hou.node(instance.data.get("instance_node")) cls.log.error( "COP Output node in '%s' does not exist. " @@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): cls.log.error( "Output node %s is not a COP node. " "COP Path must point to a COP node, " - "instead found category type: %s" - % (output_node.path(), output_node.type().category().name()) + "instead found category type: %s", + output_node.path(), output_node.type().category().name() ) return [output_node.path()] @@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): # is Cop2 to avoid potential edge case scenarios even though # the isinstance check above should be stricter than this category if output_node.type().category().name() != "Cop2": - raise PublishValidationError( - ( - "Output node {} is not of category Cop2." - " This is a bug..." - ).format(output_node.path()), - title=cls.label) + cls.log.error( + "Output node %s is not of category Cop2.", output_node.path() + ) + return [output_node.path()] diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py index 031138e21d..fa532c5437 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py @@ -4,15 +4,19 @@ from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin ) -from ayon_core.pipeline.publish import RepairAction +from ayon_core.pipeline.publish import ( + RepairAction, + get_plugin_settings, + apply_plugin_settings_automatically +) from ayon_core.hosts.houdini.api.action import SelectROPAction import os import hou -class SetDefaultViewSpaceAction(RepairAction): - label = "Set default view colorspace" +class ResetViewSpaceAction(RepairAction): + label = "Reset OCIO colorspace parm" icon = "mdi.monitor" @@ -27,12 +31,43 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin, families = ["review"] hosts = ["houdini"] label = "Validate Review Colorspace" - actions = [SetDefaultViewSpaceAction, SelectROPAction] + actions = [ResetViewSpaceAction, SelectROPAction] optional = True + review_color_space = "" + + @classmethod + def apply_settings(cls, project_settings): + # Preserve automatic settings applying logic + settings = get_plugin_settings(plugin=cls, + project_settings=project_settings, + log=cls.log, + category="houdini") + apply_plugin_settings_automatically(cls, settings, logger=cls.log) + + # workfile settings added in '0.2.13' + color_settings = project_settings["houdini"]["imageio"].get( + "workfile", {} + ) + # Add review color settings + if color_settings.get("enabled"): + cls.review_color_space = color_settings.get("review_color_space") + def process(self, instance): + rop_node = hou.node(instance.data["instance_node"]) + + # This plugin is triggered when marking render as reviewable. + # Therefore, this plugin will run on over wrong instances. + # TODO: Don't run this plugin on wrong instances. + # This plugin should run only on review product type + # with instance node of opengl type. + if rop_node.type().name() != "opengl": + self.log.debug("Skipping Validation. Rop node {} " + "is not an OpenGl node.".format(rop_node.path())) + return + if not self.is_active(instance.data): return @@ -43,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin, ) return - rop_node = hou.node(instance.data["instance_node"]) if rop_node.evalParm("colorcorrect") != 2: # any colorspace settings other than default requires # 'Color Correct' parm to be set to 'OpenColorIO' @@ -52,39 +86,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin, " 'OpenColorIO'".format(rop_node.path()) ) - if rop_node.evalParm("ociocolorspace") not in \ - hou.Color.ocio_spaces(): - + current_color_space = rop_node.evalParm("ociocolorspace") + if current_color_space not in hou.Color.ocio_spaces(): raise PublishValidationError( "Invalid value: Colorspace name doesn't exist.\n" "Check 'OCIO Colorspace' parameter on '{}' ROP" .format(rop_node.path()) ) - @classmethod - def repair(cls, instance): - """Set Default View Space Action. + # if houdini/imageio/workfile is enabled and + # Review colorspace setting is empty then this check should + # actually check if the current_color_space setting equals + # the default colorspace value. + # However, it will make the black cmd screen show up more often + # which is very annoying. + if self.review_color_space and \ + self.review_color_space != current_color_space: - It is a helper action more than a repair action, - used to set colorspace on opengl node to the default view. - """ - from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa - - rop_node = hou.node(instance.data["instance_node"]) - - if rop_node.evalParm("colorcorrect") != 2: - rop_node.setParms({"colorcorrect": 2}) - cls.log.debug( - "'Color Correction' parm on '{}' has been set to" - " 'OpenColorIO'".format(rop_node.path()) + raise PublishValidationError( + "Invalid value: Colorspace name doesn't match" + "the Colorspace specified in settings." ) - # Get default view colorspace name - default_view_space = get_default_display_view_colorspace() + @classmethod + def repair(cls, instance): + """Reset view colorspace. - rop_node.setParms({"ociocolorspace": default_view_space}) - cls.log.info( - "'OCIO Colorspace' parm on '{}' has been set to " - "the default view color space '{}'" - .format(rop_node, default_view_space) - ) + It is used to set colorspace on opengl node. + + It uses the colorspace value specified in the Houdini addon settings. + If the value in the Houdini addon settings is empty, + it will fall to the default colorspace. + + Note: + This repair action assumes that OCIO is enabled. + As if OCIO is disabled the whole validation is skipped + and this repair action won't show up. + """ + from ayon_core.hosts.houdini.api.lib import set_review_color_space + + # Fall to the default value if cls.review_color_space is empty. + if not cls.review_color_space: + # cls.review_color_space is an empty string + # when the imageio/workfile setting is disabled or + # when the Review colorspace setting is empty. + from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa + cls.review_color_space = get_default_display_view_colorspace() + + rop_node = hou.node(instance.data["instance_node"]) + set_review_color_space(rop_node, + cls.review_color_space, + cls.log) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py index b6007d3f0f..0b09306b0d 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py @@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin): report = [] instance_node = hou.node(instance.data.get("instance_node")) + # This plugin is triggered when marking render as reviewable. + # Therefore, this plugin will run on over wrong instances. + # TODO: Don't run this plugin on wrong instances. + # This plugin should run only on review product type + # with instance node of opengl type. + if instance_node.type().name() != "opengl": + self.log.debug("Skipping Validation. Rop node {} " + "is not an OpenGl node.".format(instance_node.path())) + return + invalid = self.get_invalid_scene_path(instance_node) if invalid: report.append(invalid) diff --git a/client/ayon_core/hosts/houdini/startup/OPmenu.xml b/client/ayon_core/hosts/houdini/startup/OPmenu.xml new file mode 100644 index 0000000000..0a7b265fa1 --- /dev/null +++ b/client/ayon_core/hosts/houdini/startup/OPmenu.xml @@ -0,0 +1,29 @@ + + + + + + + + opmenu.unsynchronize + + opmenu.vhda_create + + + + + + + + + + diff --git a/client/ayon_core/hosts/max/api/lib.py b/client/ayon_core/hosts/max/api/lib.py index 48bb15f538..f20f754248 100644 --- a/client/ayon_core/hosts/max/api/lib.py +++ b/client/ayon_core/hosts/max/api/lib.py @@ -6,12 +6,14 @@ import json from typing import Any, Dict, Union import six -import ayon_api -from ayon_core.pipeline import get_current_project_name, colorspace +from ayon_core.pipeline import ( + get_current_project_name, + colorspace +) from ayon_core.settings import get_project_settings from ayon_core.pipeline.context_tools import ( - get_current_folder_entity, + get_current_task_entity ) from ayon_core.style import load_stylesheet from pymxs import runtime as rt @@ -221,41 +223,30 @@ def reset_scene_resolution(): scene resolution can be overwritten by a folder if the folder.attrib contains any information regarding scene resolution. """ - - folder_entity = get_current_folder_entity( - fields={"attrib.resolutionWidth", "attrib.resolutionHeight"} - ) - folder_attributes = folder_entity["attrib"] - width = int(folder_attributes["resolutionWidth"]) - height = int(folder_attributes["resolutionHeight"]) + task_attributes = get_current_task_entity(fields={"attrib"})["attrib"] + width = int(task_attributes["resolutionWidth"]) + height = int(task_attributes["resolutionHeight"]) set_scene_resolution(width, height) -def get_frame_range(folder_entiy=None) -> Union[Dict[str, Any], None]: - """Get the current folder frame range and handles. +def get_frame_range(task_entity=None) -> Union[Dict[str, Any], None]: + """Get the current task frame range and handles Args: - folder_entiy (dict): Folder eneity. + task_entity (dict): Task Entity. Returns: dict: with frame start, frame end, handle start, handle end. """ # Set frame start/end - if folder_entiy is None: - folder_entiy = get_current_folder_entity() - - folder_attributes = folder_entiy["attrib"] - frame_start = folder_attributes.get("frameStart") - frame_end = folder_attributes.get("frameEnd") - - if frame_start is None or frame_end is None: - return {} - - frame_start = int(frame_start) - frame_end = int(frame_end) - handle_start = int(folder_attributes.get("handleStart", 0)) - handle_end = int(folder_attributes.get("handleEnd", 0)) + if task_entity is None: + task_entity = get_current_task_entity(fields={"attrib"}) + task_attributes = task_entity["attrib"] + frame_start = int(task_attributes["frameStart"]) + frame_end = int(task_attributes["frameEnd"]) + handle_start = int(task_attributes["handleStart"]) + handle_end = int(task_attributes["handleEnd"]) frame_start_handle = frame_start - handle_start frame_end_handle = frame_end + handle_end @@ -281,9 +272,9 @@ def reset_frame_range(fps: bool = True): scene frame rate in frames-per-second. """ if fps: - project_name = get_current_project_name() - project_entity = ayon_api.get_project(project_name) - fps_number = float(project_entity["attrib"].get("fps")) + task_entity = get_current_task_entity() + task_attributes = task_entity["attrib"] + fps_number = float(task_attributes["fps"]) rt.frameRate = fps_number frame_range = get_frame_range() @@ -378,12 +369,8 @@ def reset_colorspace(): """ if int(get_max_version()) < 2024: return - project_name = get_current_project_name() - colorspace_mgr = rt.ColorPipelineMgr - project_settings = get_project_settings(project_name) - max_config_data = colorspace.get_imageio_config( - project_name, "max", project_settings) + max_config_data = colorspace.get_current_context_imageio_config_preset() if max_config_data: ocio_config_path = max_config_data["path"] colorspace_mgr = rt.ColorPipelineMgr @@ -398,10 +385,7 @@ def check_colorspace(): "because Max main window can't be found.") if int(get_max_version()) >= 2024: color_mgr = rt.ColorPipelineMgr - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - max_config_data = colorspace.get_imageio_config( - project_name, "max", project_settings) + max_config_data = colorspace.get_current_context_imageio_config_preset() if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"): if not is_headless(): from ayon_core.tools.utils import SimplePopup @@ -502,9 +486,9 @@ def object_transform_set(container_children): """ transform_set = {} for node in container_children: - name = f"{node.name}.transform" + name = f"{node}.transform" transform_set[name] = node.pos - name = f"{node.name}.scale" + name = f"{node}.scale" transform_set[name] = node.scale return transform_set @@ -525,6 +509,36 @@ def get_plugins() -> list: return plugin_info_list +def update_modifier_node_names(event, node): + """Update the name of the nodes after renaming + + Args: + event (pymxs.MXSWrapperBase): Event Name ( + Mandatory argument for rt.NodeEventCallback) + node (list): Event Number ( + Mandatory argument for rt.NodeEventCallback) + + """ + containers = [ + obj + for obj in rt.Objects + if ( + rt.ClassOf(obj) == rt.Container + and rt.getUserProp(obj, "id") == "pyblish.avalon.instance" + and rt.getUserProp(obj, "productType") not in { + "workfile", "tyflow" + } + ) + ] + if not containers: + return + for container in containers: + ayon_data = container.modifiers[0].openPypeData + updated_node_names = [str(node.node) for node + in ayon_data.all_handles] + rt.setProperty(ayon_data, "sel_list", updated_node_names) + + @contextlib.contextmanager def render_resolution(width, height): """Set render resolution option during context diff --git a/client/ayon_core/hosts/max/api/pipeline.py b/client/ayon_core/hosts/max/api/pipeline.py index 675f36c24f..d9cfc3407f 100644 --- a/client/ayon_core/hosts/max/api/pipeline.py +++ b/client/ayon_core/hosts/max/api/pipeline.py @@ -52,17 +52,15 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): self._has_been_setup = True - def context_setting(): - return lib.set_context_setting() - - rt.callbacks.addScript(rt.Name('systemPostNew'), - context_setting) + rt.callbacks.addScript(rt.Name('systemPostNew'), on_new) rt.callbacks.addScript(rt.Name('filePostOpen'), lib.check_colorspace) rt.callbacks.addScript(rt.Name('postWorkspaceChange'), self._deferred_menu_creation) + rt.NodeEventCallback( + nameChanged=lib.update_modifier_node_names) def workfile_has_unsaved_changes(self): return rt.getSaveRequired() @@ -161,6 +159,14 @@ def ls() -> list: yield lib.read(container) +def on_new(): + lib.set_context_setting() + if rt.checkForSave(): + rt.resetMaxFile(rt.Name("noPrompt")) + rt.clearUndoBuffer() + rt.redrawViews() + + def containerise(name: str, nodes: list, context, namespace=None, loader=None, suffix="_CON"): data = { diff --git a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py index 4f982dd5ba..97b8c6cd52 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py +++ b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py @@ -117,7 +117,7 @@ class MaxSceneLoader(load.LoaderPlugin): ) for max_obj, obj_name in zip(max_objects, max_object_names): max_obj.name = f"{namespace}:{obj_name}" - max_container.append(rt.getNodeByName(max_obj.name)) + max_container.append(max_obj) return containerise( name, max_container, context, namespace, loader=self.__class__.__name__) @@ -158,11 +158,11 @@ class MaxSceneLoader(load.LoaderPlugin): current_max_object_names): max_obj.name = f"{namespace}:{obj_name}" max_objects.append(max_obj) - max_transform = f"{max_obj.name}.transform" + max_transform = f"{max_obj}.transform" if max_transform in transform_data.keys(): max_obj.pos = transform_data[max_transform] or 0 max_obj.scale = transform_data[ - f"{max_obj.name}.scale"] or 0 + f"{max_obj}.scale"] or 0 update_custom_attribute_data(node, max_objects) lib.imprint(container["instance_node"], { diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py index 82cad71c3e..6f5de20ae0 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py @@ -76,11 +76,11 @@ class FbxModelLoader(load.LoaderPlugin): for fbx_object in current_fbx_objects: fbx_object.name = f"{namespace}:{fbx_object.name}" fbx_objects.append(fbx_object) - fbx_transform = f"{fbx_object.name}.transform" + fbx_transform = f"{fbx_object}.transform" if fbx_transform in transform_data.keys(): fbx_object.pos = transform_data[fbx_transform] or 0 fbx_object.scale = transform_data[ - f"{fbx_object.name}.scale"] or 0 + f"{fbx_object}.scale"] or 0 with maintained_selection(): rt.Select(node) diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py index 38f2cdf43c..a9119259df 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py @@ -67,11 +67,11 @@ class ObjLoader(load.LoaderPlugin): selections = rt.GetCurrentSelection() for selection in selections: selection.name = f"{namespace}:{selection.name}" - selection_transform = f"{selection.name}.transform" + selection_transform = f"{selection}.transform" if selection_transform in transform_data.keys(): selection.pos = transform_data[selection_transform] or 0 selection.scale = transform_data[ - f"{selection.name}.scale"] or 0 + f"{selection}.scale"] or 0 update_custom_attribute_data(node, selections) with maintained_selection(): rt.Select(node) diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py index 2b946eb2aa..2ed5d64a18 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py @@ -95,11 +95,11 @@ class ModelUSDLoader(load.LoaderPlugin): for children in asset.Children: children.name = f"{namespace}:{children.name}" usd_objects.append(children) - children_transform = f"{children.name}.transform" + children_transform = f"{children}.transform" if children_transform in transform_data.keys(): children.pos = transform_data[children_transform] or 0 children.scale = transform_data[ - f"{children.name}.scale"] or 0 + f"{children}.scale"] or 0 asset.name = f"{namespace}:{asset.name}" usd_objects.append(asset) diff --git a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py index 2efb7c7f62..47690f84e9 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py +++ b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py @@ -92,10 +92,10 @@ class OxAbcLoader(load.LoaderPlugin): abc.Parent = container abc.name = f"{namespace}:{abc.name}" ox_abc_objects.append(abc) - ox_transform = f"{abc.name}.transform" + ox_transform = f"{abc}.transform" if ox_transform in transform_data.keys(): abc.pos = transform_data[ox_transform] or 0 - abc.scale = transform_data[f"{abc.name}.scale"] or 0 + abc.scale = transform_data[f"{abc}.scale"] or 0 update_custom_attribute_data(node, ox_abc_objects) lib.imprint( container["instance_node"], diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py index 67b5174200..67cec23ecc 100644 --- a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py @@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor, hosts = ["max"] families = ["pointcache"] optional = True + active = True def process(self, instance): if not self.is_active(instance.data): @@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor, @classmethod def get_attribute_defs(cls): - return [ + defs = super(ExtractAlembic, cls).get_attribute_defs() + defs.extend([ BoolDef("custom_attrs", label="Custom Attributes", default=False), - ] + ]) + return defs class ExtractCameraAlembic(ExtractAlembic): """Extract Camera with AlembicExport.""" - label = "Extract Alembic Camera" families = ["camera"] + optional = True -class ExtractModel(ExtractAlembic): +class ExtractModelAlembic(ExtractAlembic): """Extract Geometry in Alembic Format""" label = "Extract Geometry (Alembic)" families = ["model"] + optional = True def _set_abc_attributes(self, instance): attr_values = self.get_attr_values_from_data(instance.data) diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py b/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py index 2f4ec5f86c..11b55232d5 100644 --- a/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_frame_range.py @@ -42,7 +42,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin, return frame_range = get_frame_range( - instance.data["folderEntity"]) + instance.data["taskEntity"]) inst_frame_start = instance.data.get("frameStartHandle") inst_frame_end = instance.data.get("frameEndHandle") diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py b/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py index cecfd5fd12..5107665235 100644 --- a/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_instance_in_context.py @@ -38,7 +38,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin, context_label = "{} > {}".format(*context) instance_label = "{} > {}".format(folderPath, task) message = ( - "Instance '{}' publishes to different folder or task " + "Instance '{}' publishes to different context(folder or task) " "than current context: {}. Current context: {}".format( instance.name, instance_label, context_label ) @@ -46,7 +46,7 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin, raise PublishValidationError( message=message, description=( - "## Publishing to a different context folder or task\n" + "## Publishing to a different context data(folder or task)\n" "There are publish instances present which are publishing " "into a different folder path or task than your current context.\n\n" "Usually this is not what you want but there can be cases " diff --git a/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py b/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py index f499f851f1..5f6cd0a21d 100644 --- a/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py +++ b/client/ayon_core/hosts/max/plugins/publish/validate_resolution_setting.py @@ -7,7 +7,10 @@ from ayon_core.pipeline.publish import ( RepairAction, PublishValidationError ) -from ayon_core.hosts.max.api.lib import reset_scene_resolution +from ayon_core.hosts.max.api.lib import ( + reset_scene_resolution, + imprint +) class ValidateResolutionSetting(pyblish.api.InstancePlugin, @@ -25,8 +28,10 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin, if not self.is_active(instance.data): return width, height = self.get_folder_resolution(instance) - current_width = rt.renderWidth - current_height = rt.renderHeight + current_width, current_height = ( + self.get_current_resolution(instance) + ) + if current_width != width and current_height != height: raise PublishValidationError("Resolution Setting " "not matching resolution " @@ -41,12 +46,16 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin, "not matching resolution set " "on asset or shot.") - def get_folder_resolution(self, instance): - folder_entity = instance.data["folderEntity"] - if folder_entity: - folder_attributes = folder_entity["attrib"] - width = folder_attributes["resolutionWidth"] - height = folder_attributes["resolutionHeight"] + def get_current_resolution(self, instance): + return rt.renderWidth, rt.renderHeight + + @classmethod + def get_folder_resolution(cls, instance): + task_entity = instance.data.get("taskEntity") + if task_entity: + task_attributes = task_entity["attrib"] + width = task_attributes["resolutionWidth"] + height = task_attributes["resolutionHeight"] return int(width), int(height) # Defaults if not found in folder entity @@ -55,3 +64,29 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin, @classmethod def repair(cls, instance): reset_scene_resolution() + + +class ValidateReviewResolutionSetting(ValidateResolutionSetting): + families = ["review"] + optional = True + actions = [RepairAction] + + def get_current_resolution(self, instance): + current_width = instance.data["review_width"] + current_height = instance.data["review_height"] + return current_width, current_height + + @classmethod + def repair(cls, instance): + context_width, context_height = ( + cls.get_folder_resolution(instance) + ) + creator_attrs = instance.data["creator_attributes"] + creator_attrs["review_width"] = context_width + creator_attrs["review_height"] = context_height + creator_attrs_data = { + "creator_attributes": creator_attrs + } + # update the width and height of review + # data in creator_attributes + imprint(instance.data["instance_node"], creator_attrs_data) diff --git a/client/ayon_core/hosts/max/startup/startup.ms b/client/ayon_core/hosts/max/startup/startup.ms index 2dfe53a6a5..c5b4f0e526 100644 --- a/client/ayon_core/hosts/max/startup/startup.ms +++ b/client/ayon_core/hosts/max/startup/startup.ms @@ -12,4 +12,4 @@ max create mode python.ExecuteFile startup -) \ No newline at end of file +) diff --git a/client/ayon_core/hosts/maya/api/alembic.py b/client/ayon_core/hosts/maya/api/alembic.py new file mode 100644 index 0000000000..6bd00e1cb1 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/alembic.py @@ -0,0 +1,350 @@ +import json +import logging +import os + +from maya import cmds # noqa + +from ayon_core.hosts.maya.api.lib import evaluation + +log = logging.getLogger(__name__) + +# The maya alembic export types +ALEMBIC_ARGS = { + "attr": (list, tuple), + "attrPrefix": (list, tuple), + "autoSubd": bool, + "dataFormat": str, + "endFrame": float, + "eulerFilter": bool, + "frameRange": str, # "start end"; overrides startFrame & endFrame + "frameRelativeSample": float, + "melPerFrameCallback": str, + "melPostJobCallback": str, + "noNormals": bool, + "preRoll": bool, + "pythonPerFrameCallback": str, + "pythonPostJobCallback": str, + "renderableOnly": bool, + "root": (list, tuple), + "selection": bool, + "startFrame": float, + "step": float, + "stripNamespaces": bool, + "userAttr": (list, tuple), + "userAttrPrefix": (list, tuple), + "uvWrite": bool, + "uvsOnly": bool, + "verbose": bool, + "wholeFrameGeo": bool, + "worldSpace": bool, + "writeColorSets": bool, + "writeCreases": bool, # Maya 2015 Ext1+ + "writeFaceSets": bool, + "writeUVSets": bool, # Maya 2017+ + "writeVisibility": bool, +} + + +def extract_alembic( + file, + attr=None, + attrPrefix=None, + dataFormat="ogawa", + endFrame=None, + eulerFilter=True, + frameRange="", + melPerFrameCallback=None, + melPostJobCallback=None, + noNormals=False, + preRoll=False, + preRollStartFrame=0, + pythonPerFrameCallback=None, + pythonPostJobCallback=None, + renderableOnly=False, + root=None, + selection=True, + startFrame=None, + step=1.0, + stripNamespaces=True, + userAttr=None, + userAttrPrefix=None, + uvsOnly=False, + uvWrite=True, + verbose=False, + wholeFrameGeo=False, + worldSpace=False, + writeColorSets=False, + writeCreases=False, + writeFaceSets=False, + writeUVSets=False, + writeVisibility=False +): + """Extract a single Alembic Cache. + + This extracts an Alembic cache using the `-selection` flag to minimize + the extracted content to solely what was Collected into the instance. + + Arguments: + file (str): The filepath to write the alembic file to. + + attr (list of str, optional): A specific geometric attribute to write + out. Defaults to []. + + attrPrefix (list of str, optional): Prefix filter for determining which + geometric attributes to write out. Defaults to ["ABC_"]. + + dataFormat (str): The data format to use for the cache, + defaults to "ogawa" + + endFrame (float): End frame of output. Ignored if `frameRange` + provided. + + eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with + an Euler filter. Euler filtering helps resolve irregularities in + rotations especially if X, Y, and Z rotations exceed 360 degrees. + Defaults to True. + + frameRange (tuple or str): Two-tuple with start and end frame or a + string formatted as: "startFrame endFrame". This argument + overrides `startFrame` and `endFrame` arguments. + + melPerFrameCallback (Optional[str]): MEL callback run per frame. + + melPostJobCallback (Optional[str]): MEL callback after last frame is + written. + + noNormals (bool): When on, normal data from the original polygon + objects is not included in the exported Alembic cache file. + + preRoll (bool): This frame range will not be sampled. + Defaults to False. + + preRollStartFrame (float): The frame to start scene + evaluation at. This is used to set the starting frame for time + dependent translations and can be used to evaluate run-up that + isn't actually translated. Defaults to 0. + + pythonPerFrameCallback (Optional[str]): Python callback run per frame. + + pythonPostJobCallback (Optional[str]): Python callback after last frame + is written. + + renderableOnly (bool): When on, any non-renderable nodes or hierarchy, + such as hidden objects, are not included in the Alembic file. + Defaults to False. + + root (list of str): Maya dag path which will be parented to + the root of the Alembic file. Defaults to [], which means the + entire scene will be written out. + + selection (bool): Write out all all selected nodes from the + active selection list that are descendents of the roots specified + with -root. Defaults to False. + + startFrame (float): Start frame of output. Ignored if `frameRange` + provided. + + step (float): The time interval (expressed in frames) at + which the frame range is sampled. Additional samples around each + frame can be specified with -frs. Defaults to 1.0. + + stripNamespaces (bool): When on, any namespaces associated with the + exported objects are removed from the Alembic file. For example, an + object with the namespace taco:foo:bar appears as bar in the + Alembic file. + + userAttr (list of str, optional): A specific user defined attribute to + write out. Defaults to []. + + userAttrPrefix (list of str, optional): Prefix filter for determining + which user defined attributes to write out. Defaults to []. + + uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes + will be written to the Alembic file. + + uvWrite (bool): When on, UV data from polygon meshes and subdivision + objects are written to the Alembic file. Only the current UV map is + included. + + verbose (bool): When on, outputs frame number information to the + Script Editor or output window during extraction. + + wholeFrameGeo (bool): Data for geometry will only be written + out on whole frames. Defaults to False. + + worldSpace (bool): When on, the top node in the node hierarchy is + stored as world space. By default, these nodes are stored as local + space. Defaults to False. + + writeColorSets (bool): Write all color sets on MFnMeshes as + color 3 or color 4 indexed geometry parameters with face varying + scope. Defaults to False. + + writeCreases (bool): If the mesh has crease edges or crease + vertices, the mesh (OPolyMesh) would now be written out as an OSubD + and crease info will be stored in the Alembic file. Otherwise, + creases info won't be preserved in Alembic file unless a custom + Boolean attribute SubDivisionMesh has been added to mesh node and + its value is true. Defaults to False. + + writeFaceSets (bool): Write all Face sets on MFnMeshes. + Defaults to False. + + writeUVSets (bool): Write all uv sets on MFnMeshes as vector + 2 indexed geometry parameters with face varying scope. Defaults to + False. + + writeVisibility (bool): Visibility state will be stored in + the Alembic file. Otherwise everything written out is treated as + visible. Defaults to False. + """ + + # Ensure alembic exporter is loaded + cmds.loadPlugin('AbcExport', quiet=True) + + # Alembic Exporter requires forward slashes + file = file.replace('\\', '/') + + # Ensure list arguments are valid. + attr = attr or [] + attrPrefix = attrPrefix or [] + userAttr = userAttr or [] + userAttrPrefix = userAttrPrefix or [] + root = root or [] + + # Pass the start and end frame on as `frameRange` so that it + # never conflicts with that argument + if not frameRange: + # Fallback to maya timeline if no start or end frame provided. + if startFrame is None: + startFrame = cmds.playbackOptions(query=True, + animationStartTime=True) + if endFrame is None: + endFrame = cmds.playbackOptions(query=True, + animationEndTime=True) + + # Ensure valid types are converted to frame range + assert isinstance(startFrame, ALEMBIC_ARGS["startFrame"]) + assert isinstance(endFrame, ALEMBIC_ARGS["endFrame"]) + frameRange = "{0} {1}".format(startFrame, endFrame) + else: + # Allow conversion from tuple for `frameRange` + if isinstance(frameRange, (list, tuple)): + assert len(frameRange) == 2 + frameRange = "{0} {1}".format(frameRange[0], frameRange[1]) + + # Assemble options + options = { + "selection": selection, + "frameRange": frameRange, + "eulerFilter": eulerFilter, + "noNormals": noNormals, + "preRoll": preRoll, + "root": root, + "renderableOnly": renderableOnly, + "uvWrite": uvWrite, + "uvsOnly": uvsOnly, + "writeColorSets": writeColorSets, + "writeFaceSets": writeFaceSets, + "wholeFrameGeo": wholeFrameGeo, + "worldSpace": worldSpace, + "writeVisibility": writeVisibility, + "writeUVSets": writeUVSets, + "writeCreases": writeCreases, + "dataFormat": dataFormat, + "step": step, + "attr": attr, + "attrPrefix": attrPrefix, + "userAttr": userAttr, + "userAttrPrefix": userAttrPrefix, + "stripNamespaces": stripNamespaces, + "verbose": verbose + } + + # Validate options + for key, value in options.copy().items(): + + # Discard unknown options + if key not in ALEMBIC_ARGS: + log.warning("extract_alembic() does not support option '%s'. " + "Flag will be ignored..", key) + options.pop(key) + continue + + # Validate value type + valid_types = ALEMBIC_ARGS[key] + if not isinstance(value, valid_types): + raise TypeError("Alembic option unsupported type: " + "{0} (expected {1})".format(value, valid_types)) + + # Ignore empty values, like an empty string, since they mess up how + # job arguments are built + if isinstance(value, (list, tuple)): + value = [x for x in value if x.strip()] + + # Ignore option completely if no values remaining + if not value: + options.pop(key) + continue + + options[key] = value + + # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ + maya_version = int(cmds.about(version=True)) + if maya_version >= 2018: + options['autoSubd'] = options.pop('writeCreases', False) + + # Only add callbacks if they are set so that we're not passing `None` + callbacks = { + "melPerFrameCallback": melPerFrameCallback, + "melPostJobCallback": melPostJobCallback, + "pythonPerFrameCallback": pythonPerFrameCallback, + "pythonPostJobCallback": pythonPostJobCallback, + } + for key, callback in callbacks.items(): + if callback: + options[key] = str(callback) + + # Format the job string from options + job_args = list() + for key, value in options.items(): + if isinstance(value, (list, tuple)): + for entry in value: + job_args.append("-{} {}".format(key, entry)) + elif isinstance(value, bool): + # Add only when state is set to True + if value: + job_args.append("-{0}".format(key)) + else: + job_args.append("-{0} {1}".format(key, value)) + + job_str = " ".join(job_args) + job_str += ' -file "%s"' % file + + # Ensure output directory exists + parent_dir = os.path.dirname(file) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + if verbose: + log.debug("Preparing Alembic export with options: %s", + json.dumps(options, indent=4)) + log.debug("Extracting Alembic with job arguments: %s", job_str) + + # Perform extraction + print("Alembic Job Arguments : {}".format(job_str)) + + # Disable the parallel evaluation temporarily to ensure no buggy + # exports are made. (PLN-31) + # TODO: Make sure this actually fixes the issues + with evaluation("off"): + cmds.AbcExport( + j=job_str, + verbose=verbose, + preRollStartFrame=preRollStartFrame + ) + + if verbose: + log.debug("Extracted Alembic to: %s", file) + + return file diff --git a/client/ayon_core/hosts/maya/api/fbx.py b/client/ayon_core/hosts/maya/api/fbx.py index 939da4011b..fd1bf2c901 100644 --- a/client/ayon_core/hosts/maya/api/fbx.py +++ b/client/ayon_core/hosts/maya/api/fbx.py @@ -47,7 +47,7 @@ class FBXExtractor: "smoothMesh": bool, "instances": bool, # "referencedContainersContent": bool, # deprecated in Maya 2016+ - "bakeComplexAnimation": int, + "bakeComplexAnimation": bool, "bakeComplexStart": int, "bakeComplexEnd": int, "bakeComplexStep": int, @@ -59,6 +59,7 @@ class FBXExtractor: "constraints": bool, "lights": bool, "embeddedTextures": bool, + "includeChildren": bool, "inputConnections": bool, "upAxis": str, # x, y or z, "triangulate": bool, @@ -102,6 +103,7 @@ class FBXExtractor: "constraints": False, "lights": True, "embeddedTextures": False, + "includeChildren": True, "inputConnections": True, "upAxis": "y", "triangulate": False, diff --git a/client/ayon_core/hosts/maya/api/lib.py b/client/ayon_core/hosts/maya/api/lib.py index 321bcbc0b5..2b41ffc06c 100644 --- a/client/ayon_core/hosts/maya/api/lib.py +++ b/client/ayon_core/hosts/maya/api/lib.py @@ -70,37 +70,6 @@ DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] -# The maya alembic export types -_alembic_options = { - "startFrame": float, - "endFrame": float, - "frameRange": str, # "start end"; overrides startFrame & endFrame - "eulerFilter": bool, - "frameRelativeSample": float, - "noNormals": bool, - "renderableOnly": bool, - "step": float, - "stripNamespaces": bool, - "uvWrite": bool, - "wholeFrameGeo": bool, - "worldSpace": bool, - "writeVisibility": bool, - "writeColorSets": bool, - "writeFaceSets": bool, - "writeCreases": bool, # Maya 2015 Ext1+ - "writeUVSets": bool, # Maya 2017+ - "dataFormat": str, - "root": (list, tuple), - "attr": (list, tuple), - "attrPrefix": (list, tuple), - "userAttr": (list, tuple), - "melPerFrameCallback": str, - "melPostJobCallback": str, - "pythonPerFrameCallback": str, - "pythonPostJobCallback": str, - "selection": bool -} - INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000} FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94} @@ -1330,7 +1299,7 @@ def is_visible(node, override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node)) override_visibility = cmds.getAttr('{}.overrideVisibility'.format( node)) - if override_enabled and override_visibility: + if override_enabled and not override_visibility: return False if parentHidden: @@ -1346,178 +1315,6 @@ def is_visible(node, return True - -def extract_alembic(file, - startFrame=None, - endFrame=None, - selection=True, - uvWrite=True, - eulerFilter=True, - dataFormat="ogawa", - verbose=False, - **kwargs): - """Extract a single Alembic Cache. - - This extracts an Alembic cache using the `-selection` flag to minimize - the extracted content to solely what was Collected into the instance. - - Arguments: - - startFrame (float): Start frame of output. Ignored if `frameRange` - provided. - - endFrame (float): End frame of output. Ignored if `frameRange` - provided. - - frameRange (tuple or str): Two-tuple with start and end frame or a - string formatted as: "startFrame endFrame". This argument - overrides `startFrame` and `endFrame` arguments. - - dataFormat (str): The data format to use for the cache, - defaults to "ogawa" - - verbose (bool): When on, outputs frame number information to the - Script Editor or output window during extraction. - - noNormals (bool): When on, normal data from the original polygon - objects is not included in the exported Alembic cache file. - - renderableOnly (bool): When on, any non-renderable nodes or hierarchy, - such as hidden objects, are not included in the Alembic file. - Defaults to False. - - stripNamespaces (bool): When on, any namespaces associated with the - exported objects are removed from the Alembic file. For example, an - object with the namespace taco:foo:bar appears as bar in the - Alembic file. - - uvWrite (bool): When on, UV data from polygon meshes and subdivision - objects are written to the Alembic file. Only the current UV map is - included. - - worldSpace (bool): When on, the top node in the node hierarchy is - stored as world space. By default, these nodes are stored as local - space. Defaults to False. - - eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with - an Euler filter. Euler filtering helps resolve irregularities in - rotations especially if X, Y, and Z rotations exceed 360 degrees. - Defaults to True. - - """ - - # Ensure alembic exporter is loaded - cmds.loadPlugin('AbcExport', quiet=True) - - # Alembic Exporter requires forward slashes - file = file.replace('\\', '/') - - # Pass the start and end frame on as `frameRange` so that it - # never conflicts with that argument - if "frameRange" not in kwargs: - # Fallback to maya timeline if no start or end frame provided. - if startFrame is None: - startFrame = cmds.playbackOptions(query=True, - animationStartTime=True) - if endFrame is None: - endFrame = cmds.playbackOptions(query=True, - animationEndTime=True) - - # Ensure valid types are converted to frame range - assert isinstance(startFrame, _alembic_options["startFrame"]) - assert isinstance(endFrame, _alembic_options["endFrame"]) - kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame) - else: - # Allow conversion from tuple for `frameRange` - frame_range = kwargs["frameRange"] - if isinstance(frame_range, (list, tuple)): - assert len(frame_range) == 2 - kwargs["frameRange"] = "{0} {1}".format(frame_range[0], - frame_range[1]) - - # Assemble options - options = { - "selection": selection, - "uvWrite": uvWrite, - "eulerFilter": eulerFilter, - "dataFormat": dataFormat - } - options.update(kwargs) - - # Validate options - for key, value in options.copy().items(): - - # Discard unknown options - if key not in _alembic_options: - log.warning("extract_alembic() does not support option '%s'. " - "Flag will be ignored..", key) - options.pop(key) - continue - - # Validate value type - valid_types = _alembic_options[key] - if not isinstance(value, valid_types): - raise TypeError("Alembic option unsupported type: " - "{0} (expected {1})".format(value, valid_types)) - - # Ignore empty values, like an empty string, since they mess up how - # job arguments are built - if isinstance(value, (list, tuple)): - value = [x for x in value if x.strip()] - - # Ignore option completely if no values remaining - if not value: - options.pop(key) - continue - - options[key] = value - - # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ - maya_version = int(cmds.about(version=True)) - if maya_version >= 2018: - options['autoSubd'] = options.pop('writeCreases', False) - - # Format the job string from options - job_args = list() - for key, value in options.items(): - if isinstance(value, (list, tuple)): - for entry in value: - job_args.append("-{} {}".format(key, entry)) - elif isinstance(value, bool): - # Add only when state is set to True - if value: - job_args.append("-{0}".format(key)) - else: - job_args.append("-{0} {1}".format(key, value)) - - job_str = " ".join(job_args) - job_str += ' -file "%s"' % file - - # Ensure output directory exists - parent_dir = os.path.dirname(file) - if not os.path.exists(parent_dir): - os.makedirs(parent_dir) - - if verbose: - log.debug("Preparing Alembic export with options: %s", - json.dumps(options, indent=4)) - log.debug("Extracting Alembic with job arguments: %s", job_str) - - # Perform extraction - print("Alembic Job Arguments : {}".format(job_str)) - - # Disable the parallel evaluation temporarily to ensure no buggy - # exports are made. (PLN-31) - # TODO: Make sure this actually fixes the issues - with evaluation("off"): - cmds.AbcExport(j=job_str, verbose=verbose) - - if verbose: - log.debug("Extracted Alembic to: %s", file) - - return file - - # region ID def get_id_required_nodes(referenced_nodes=False, nodes=None, @@ -2520,7 +2317,16 @@ def set_scene_fps(fps, update=True): """ fps_mapping = { + '2': '2fps', + '3': '3fps', + '4': '4fps', + '5': '5fps', + '6': '6fps', + '8': '8fps', + '10': '10fps', + '12': '12fps', '15': 'game', + '16': '16fps', '24': 'film', '25': 'pal', '30': 'ntsc', @@ -2612,21 +2418,24 @@ def get_fps_for_current_context(): Returns: Union[int, float]: FPS value. """ - - project_name = get_current_project_name() - folder_path = get_current_folder_path() - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields={"attrib.fps"} - ) or {} - fps = folder_entity.get("attrib", {}).get("fps") + task_entity = get_current_task_entity(fields={"attrib"}) + fps = task_entity.get("attrib", {}).get("fps") if not fps: - project_entity = ayon_api.get_project( - project_name, fields=["attrib.fps"] + project_name = get_current_project_name() + folder_path = get_current_folder_path() + folder_entity = ayon_api.get_folder_by_path( + project_name, folder_path, fields={"attrib.fps"} ) or {} - fps = project_entity.get("attrib", {}).get("fps") + fps = folder_entity.get("attrib", {}).get("fps") if not fps: - fps = 25 + project_entity = ayon_api.get_project( + project_name, fields=["attrib.fps"] + ) or {} + fps = project_entity.get("attrib", {}).get("fps") + + if not fps: + fps = 25 return convert_to_maya_fps(fps) @@ -4403,3 +4212,23 @@ def create_rig_animation_instance( variant=namespace, pre_create_data={"use_selection": True} ) + + +def get_node_index_under_parent(node: str) -> int: + """Return the index of a DAG node under its parent. + + Arguments: + node (str): A DAG Node path. + + Returns: + int: The DAG node's index under its parents or world + + """ + node = cmds.ls(node, long=True)[0] # enforce long names + parent = node.rsplit("|", 1)[0] + if not parent: + return cmds.ls(assemblies=True, long=True).index(node) + else: + return cmds.listRelatives(parent, + children=True, + fullPath=True).index(node) diff --git a/client/ayon_core/hosts/maya/api/lib_renderproducts.py b/client/ayon_core/hosts/maya/api/lib_renderproducts.py index 832d1c21c2..52c282c6de 100644 --- a/client/ayon_core/hosts/maya/api/lib_renderproducts.py +++ b/client/ayon_core/hosts/maya/api/lib_renderproducts.py @@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts): # AOVs > Legacy > Maya Render View > Mode aovs_enabled = bool( - self._get_attr("defaultArnoldRenderOptions.aovMode") + self._get_attr( + "defaultArnoldRenderOptions.aovMode", as_string=False) ) if not aovs_enabled: return beauty_products diff --git a/client/ayon_core/hosts/maya/api/pipeline.py b/client/ayon_core/hosts/maya/api/pipeline.py index 864a0c1599..74d73e5f95 100644 --- a/client/ayon_core/hosts/maya/api/pipeline.py +++ b/client/ayon_core/hosts/maya/api/pipeline.py @@ -30,9 +30,11 @@ from ayon_core.pipeline import ( register_loader_plugin_path, register_inventory_action_path, register_creator_plugin_path, + register_workfile_build_plugin_path, deregister_loader_plugin_path, deregister_inventory_action_path, deregister_creator_plugin_path, + deregister_workfile_build_plugin_path, AYON_CONTAINER_ID, AVALON_CONTAINER_ID, ) @@ -47,7 +49,6 @@ from ayon_core.hosts.maya import MAYA_ROOT_DIR from ayon_core.hosts.maya.lib import create_workspace_mel from . import menu, lib -from .workfile_template_builder import MayaPlaceholderLoadPlugin from .workio import ( open_file, save_file, @@ -64,6 +65,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -93,7 +95,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) - self.log.info(PUBLISH_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) self.log.info("Installing callbacks ... ") register_event_callback("init", on_init) @@ -148,11 +150,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): def get_containers(self): return ls() - def get_workfile_build_placeholder_plugins(self): - return [ - MayaPlaceholderLoadPlugin - ] - @contextlib.contextmanager def maintained_selection(self): with lib.maintained_selection(): @@ -338,6 +335,7 @@ def uninstall(): deregister_loader_plugin_path(LOAD_PATH) deregister_creator_plugin_path(CREATE_PATH) deregister_inventory_action_path(INVENTORY_PATH) + deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH) menu.uninstall() diff --git a/client/ayon_core/hosts/maya/api/render_setup_tools.py b/client/ayon_core/hosts/maya/api/render_setup_tools.py index a5e04de184..9b00b53eee 100644 --- a/client/ayon_core/hosts/maya/api/render_setup_tools.py +++ b/client/ayon_core/hosts/maya/api/render_setup_tools.py @@ -19,7 +19,7 @@ from .lib import pairwise @contextlib.contextmanager -def _allow_export_from_render_setup_layer(): +def allow_export_from_render_setup_layer(): """Context manager to override Maya settings to allow RS layer export""" try: @@ -102,7 +102,7 @@ def export_in_rs_layer(path, nodes, export=None): cmds.disconnectAttr(src, dest) # Export Selected - with _allow_export_from_render_setup_layer(): + with allow_export_from_render_setup_layer(): cmds.select(nodes, noExpand=True) if export: export() diff --git a/client/ayon_core/hosts/maya/api/workfile_template_builder.py b/client/ayon_core/hosts/maya/api/workfile_template_builder.py index 75386d7e64..f4f9a34983 100644 --- a/client/ayon_core/hosts/maya/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/maya/api/workfile_template_builder.py @@ -12,14 +12,13 @@ from ayon_core.pipeline.workfile.workfile_template_builder import ( TemplateAlreadyImported, AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - PlaceholderLoadMixin, + PlaceholderItem, ) from ayon_core.tools.workfile_template_build import ( WorkfileBuildPlaceholderDialog, ) -from .lib import read, imprint, get_reference_node, get_main_window +from .lib import read, imprint, get_main_window PLACEHOLDER_SET = "PLACEHOLDERS_SET" @@ -91,170 +90,102 @@ class MayaTemplateBuilder(AbstractTemplateBuilder): return True -class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): - identifier = "maya.load" - label = "Maya load" +class MayaPlaceholderPlugin(PlaceholderPlugin): + """Base Placeholder Plugin for Maya with one unified cache. - def _collect_scene_placeholders(self): - # Cache placeholder data to shared data - placeholder_nodes = self.builder.get_shared_populate_data( - "placeholder_nodes" - ) - if placeholder_nodes is None: - attributes = cmds.ls("*.plugin_identifier", long=True) - placeholder_nodes = {} - for attribute in attributes: - node_name = attribute.rpartition(".")[0] - placeholder_nodes[node_name] = ( - self._parse_placeholder_node_data(node_name) - ) + Creates a locator as placeholder node, which during populate provide + all of its attributes defined on the locator's transform in + `placeholder.data` and where `placeholder.scene_identifier` is the + full path to the node. - self.builder.set_shared_populate_data( - "placeholder_nodes", placeholder_nodes - ) - return placeholder_nodes + Inherited classes must still implement `populate_placeholder` - def _parse_placeholder_node_data(self, node_name): - placeholder_data = read(node_name) - parent_name = ( - cmds.getAttr(node_name + ".parent", asString=True) - or node_name.rpartition("|")[0] - or "" - ) - if parent_name: - siblings = cmds.listRelatives(parent_name, children=True) - else: - siblings = cmds.ls(assemblies=True) - node_shortname = node_name.rpartition("|")[2] - current_index = cmds.getAttr(node_name + ".index", asString=True) - if current_index < 0: - current_index = siblings.index(node_shortname) + """ - placeholder_data.update({ - "parent": parent_name, - "index": current_index - }) - return placeholder_data + use_selection_as_parent = True + item_class = PlaceholderItem def _create_placeholder_name(self, placeholder_data): - placeholder_name_parts = placeholder_data["builder_type"].split("_") + return self.identifier.replace(".", "_") - pos = 1 - placeholder_product_type = placeholder_data.get("product_type") - if placeholder_product_type is None: - placeholder_product_type = placeholder_data.get("family") - - if placeholder_product_type: - placeholder_name_parts.insert(pos, placeholder_product_type) - pos += 1 - - # add loader arguments if any - loader_args = placeholder_data["loader_args"] - if loader_args: - loader_args = json.loads(loader_args.replace('\'', '\"')) - values = [v for v in loader_args.values()] - for value in values: - placeholder_name_parts.insert(pos, value) - pos += 1 - - placeholder_name = "_".join(placeholder_name_parts) - - return placeholder_name.capitalize() - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" + def _collect_scene_placeholders(self): + nodes_by_identifier = self.builder.get_shared_populate_data( + "placeholder_nodes" ) - if loaded_representation_ids is None: - try: - containers = cmds.sets("AVALON_CONTAINERS", q=True) - except ValueError: - containers = [] + if nodes_by_identifier is None: + # Cache placeholder data to shared data + nodes = cmds.ls("*.plugin_identifier", long=True, objectsOnly=True) - loaded_representation_ids = { - cmds.getAttr(container + ".representation") - for container in containers - } + nodes_by_identifier = {} + for node in nodes: + identifier = cmds.getAttr("{}.plugin_identifier".format(node)) + nodes_by_identifier.setdefault(identifier, []).append(node) + + # Set the cache self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids + "placeholder_nodes", nodes_by_identifier ) - return loaded_representation_ids + + return nodes_by_identifier def create_placeholder(self, placeholder_data): - selection = cmds.ls(selection=True) - if len(selection) > 1: - raise ValueError("More then one item are selected") - parent = selection[0] if selection else None + parent = None + if self.use_selection_as_parent: + selection = cmds.ls(selection=True) + if len(selection) > 1: + raise ValueError( + "More than one node is selected. " + "Please select only one to define the parent." + ) + parent = selection[0] if selection else None placeholder_data["plugin_identifier"] = self.identifier - placeholder_name = self._create_placeholder_name(placeholder_data) placeholder = cmds.spaceLocator(name=placeholder_name)[0] if parent: placeholder = cmds.parent(placeholder, selection[0])[0] - imprint(placeholder, placeholder_data) - - # Add helper attributes to keep placeholder info - cmds.addAttr( - placeholder, - longName="parent", - hidden=True, - dataType="string" - ) - cmds.addAttr( - placeholder, - longName="index", - hidden=True, - attributeType="short", - defaultValue=-1 - ) - - cmds.setAttr(placeholder + ".parent", "", type="string") + self.imprint(placeholder, placeholder_data) def update_placeholder(self, placeholder_item, placeholder_data): node_name = placeholder_item.scene_identifier - new_values = {} + + changed_values = {} for key, value in placeholder_data.items(): - placeholder_value = placeholder_item.data.get(key) - if value != placeholder_value: - new_values[key] = value - placeholder_item.data[key] = value + if value != placeholder_item.data.get(key): + changed_values[key] = value - for key in new_values.keys(): - cmds.deleteAttr(node_name + "." + key) + # Delete attributes to ensure we imprint new data with correct type + for key in changed_values.keys(): + placeholder_item.data[key] = value + if cmds.attributeQuery(key, node=node_name, exists=True): + attribute = "{}.{}".format(node_name, key) + cmds.deleteAttr(attribute) - imprint(node_name, new_values) + self.imprint(node_name, changed_values) def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, placeholder_data in scene_placeholders.items(): - if placeholder_data.get("plugin_identifier") != self.identifier: - continue - + placeholders = [] + nodes_by_identifier = self._collect_scene_placeholders() + for node in nodes_by_identifier.get(self.identifier, []): # TODO do data validations and maybe upgrades if they are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) + placeholder_data = self.read(node) + placeholders.append( + self.item_class(scene_identifier=node, + data=placeholder_data, + plugin=self) ) - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) + return placeholders def post_placeholder_process(self, placeholder, failed): """Cleanup placeholder after load of its corresponding representations. + Hide placeholder, add them to placeholder set. + Used only by PlaceholderCreateMixin and PlaceholderLoadMixin + Args: placeholder (PlaceholderItem): Item which was just used to load representation. @@ -263,81 +194,56 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): # Hide placeholder and add them to placeholder set node = placeholder.scene_identifier + # If we just populate the placeholders from current scene, the + # placeholder set will not be created so account for that. + if not cmds.objExists(PLACEHOLDER_SET): + cmds.sets(name=PLACEHOLDER_SET, empty=True) + cmds.sets(node, addElement=PLACEHOLDER_SET) cmds.hide(node) - cmds.setAttr(node + ".hiddenInOutliner", True) + cmds.setAttr("{}.hiddenInOutliner".format(node), True) def delete_placeholder(self, placeholder): - """Remove placeholder if building was successful""" - cmds.delete(placeholder.scene_identifier) + """Remove placeholder if building was successful - def load_succeed(self, placeholder, container): - self._parent_in_hierarchy(placeholder, container) - - def _parent_in_hierarchy(self, placeholder, container): - """Parent loaded container to placeholder's parent. - - ie : Set loaded content as placeholder's sibling - - Args: - container (str): Placeholder loaded containers + Used only by PlaceholderCreateMixin and PlaceholderLoadMixin. """ + node = placeholder.scene_identifier - if not container: - return + # To avoid that deleting a placeholder node will have Maya delete + # any objectSets the node was a member of we will first remove it + # from any sets it was a member of. This way the `PLACEHOLDERS_SET` + # will survive long enough + sets = cmds.listSets(o=node) or [] + for object_set in sets: + cmds.sets(node, remove=object_set) - roots = cmds.sets(container, q=True) or [] - ref_node = None - try: - ref_node = get_reference_node(roots) - except AssertionError as e: - self.log.info(e.args[0]) + cmds.delete(node) - nodes_to_parent = [] - for root in roots: - if ref_node: - ref_root = cmds.referenceQuery(root, nodes=True)[0] - ref_root = ( - cmds.listRelatives(ref_root, parent=True, path=True) or - [ref_root] - ) - nodes_to_parent.extend(ref_root) - continue - if root.endswith("_RN"): - # Backwards compatibility for hardcoded reference names. - refRoot = cmds.referenceQuery(root, n=True)[0] - refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot] - nodes_to_parent.extend(refRoot) - elif root not in cmds.listSets(allSets=True): - nodes_to_parent.append(root) + def imprint(self, node, data): + """Imprint call for placeholder node""" - elif not cmds.sets(root, q=True): - return + # Complicated data that can't be represented as flat maya attributes + # we write to json strings, e.g. multiselection EnumDef + for key, value in data.items(): + if isinstance(value, (list, tuple, dict)): + data[key] = "JSON::{}".format(json.dumps(value)) - # Move loaded nodes to correct index in outliner hierarchy - placeholder_form = cmds.xform( - placeholder.scene_identifier, - q=True, - matrix=True, - worldSpace=True - ) - scene_parent = cmds.listRelatives( - placeholder.scene_identifier, parent=True, fullPath=True - ) - for node in set(nodes_to_parent): - cmds.reorder(node, front=True) - cmds.reorder(node, relative=placeholder.data["index"]) - cmds.xform(node, matrix=placeholder_form, ws=True) - if scene_parent: - cmds.parent(node, scene_parent) - else: - cmds.parent(node, world=True) + imprint(node, data) - holding_sets = cmds.listSets(object=placeholder.scene_identifier) - if not holding_sets: - return - for holding_set in holding_sets: - cmds.sets(roots, forceElement=holding_set) + def read(self, node): + """Read call for placeholder node""" + + data = read(node) + + # Complicated data that can't be represented as flat maya attributes + # we read from json strings, e.g. multiselection EnumDef + for key, value in data.items(): + if isinstance(value, str) and value.startswith("JSON::"): + value = value[len("JSON::"):] # strip of JSON:: prefix + data[key] = json.loads(value) + + return data def build_workfile_template(*args): diff --git a/client/ayon_core/hosts/maya/api/yeti.py b/client/ayon_core/hosts/maya/api/yeti.py new file mode 100644 index 0000000000..1526c3a2f3 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/yeti.py @@ -0,0 +1,101 @@ +from typing import List + +from maya import cmds + + +def get_yeti_user_variables(yeti_shape_node: str) -> List[str]: + """Get user defined yeti user variables for a `pgYetiMaya` shape node. + + Arguments: + yeti_shape_node (str): The `pgYetiMaya` shape node. + + Returns: + list: Attribute names (for a vector attribute it only lists the top + parent attribute, not the attribute per axis) + """ + + attrs = cmds.listAttr(yeti_shape_node, + userDefined=True, + string=("yetiVariableV_*", + "yetiVariableF_*")) or [] + valid_attrs = [] + for attr in attrs: + attr_type = cmds.attributeQuery(attr, node=yeti_shape_node, + attributeType=True) + if attr.startswith("yetiVariableV_") and attr_type == "double3": + # vector + valid_attrs.append(attr) + elif attr.startswith("yetiVariableF_") and attr_type == "double": + valid_attrs.append(attr) + + return valid_attrs + + +def create_yeti_variable(yeti_shape_node: str, + attr_name: str, + value=None, + force_value: bool = False) -> bool: + """Get user defined yeti user variables for a `pgYetiMaya` shape node. + + Arguments: + yeti_shape_node (str): The `pgYetiMaya` shape node. + attr_name (str): The fully qualified yeti variable name, e.g. + "yetiVariableF_myfloat" or "yetiVariableV_myvector" + value (object): The value to set (must match the type of the attribute) + When value is None it will ignored and not be set. + force_value (bool): Whether to set the value if the attribute already + exists or not. + + Returns: + bool: Whether the attribute value was set or not. + + """ + exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True) + if not exists: + if attr_name.startswith("yetiVariableV_"): + _create_vector_yeti_user_variable(yeti_shape_node, attr_name) + if attr_name.startswith("yetiVariableF_"): + _create_float_yeti_user_variable(yeti_shape_node, attr_name) + + if value is not None and (not exists or force_value): + plug = "{}.{}".format(yeti_shape_node, attr_name) + if ( + isinstance(value, (list, tuple)) + and attr_name.startswith("yetiVariableV_") + ): + cmds.setAttr(plug, *value, type="double3") + else: + cmds.setAttr(plug, value) + + return True + return False + + +def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str): + if not attr_name.startswith("yetiVariableV_"): + raise ValueError("Must start with yetiVariableV_") + cmds.addAttr(yeti_shape_node, + longName=attr_name, + attributeType="double3", + cachedInternally=True, + keyable=True) + for axis in "XYZ": + cmds.addAttr(yeti_shape_node, + longName="{}{}".format(attr_name, axis), + attributeType="double", + parent=attr_name, + cachedInternally=True, + keyable=True) + + +def _create_float_yeti_user_variable(yeti_node: str, attr_name: str): + if not attr_name.startswith("yetiVariableF_"): + raise ValueError("Must start with yetiVariableF_") + + cmds.addAttr(yeti_node, + longName=attr_name, + attributeType="double", + cachedInternally=True, + softMinValue=0, + softMaxValue=100, + keyable=True) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation.py b/client/ayon_core/hosts/maya/plugins/create/create_animation.py deleted file mode 100644 index f30d9aba81..0000000000 --- a/client/ayon_core/hosts/maya/plugins/create/create_animation.py +++ /dev/null @@ -1,89 +0,0 @@ -from ayon_core.hosts.maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreateAnimation(plugin.MayaHiddenCreator): - """Animation output for character rigs - - We hide the animation creator from the UI since the creation of it is - automated upon loading a rig. There's an inventory action to recreate it - for loaded rigs if by chance someone deleted the animation instance. - """ - identifier = "io.openpype.creators.maya.animation" - name = "animationDefault" - label = "Animation" - product_type = "animation" - icon = "male" - - write_color_sets = False - write_face_sets = False - include_parent_hierarchy = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=self.write_color_sets), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=self.write_face_sets), - BoolDef("writeNormals", - label="Write normals", - tooltip="Write normals with the deforming geometry", - default=True), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=self.include_parent_hierarchy), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def apply_settings(self, project_settings): - super(CreateAnimation, self).apply_settings(project_settings) - # Hardcoding creator to be enabled due to existing settings would - # disable the creator causing the creator plugin to not be - # discoverable. - self.enabled = True diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py new file mode 100644 index 0000000000..069762e4ae --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py @@ -0,0 +1,138 @@ +from maya import cmds + +from ayon_core.hosts.maya.api import lib, plugin + +from ayon_core.lib import ( + BoolDef, + NumberDef, +) + + +def _get_animation_attr_defs(cls): + """Get Animation generic definitions.""" + defs = lib.collect_animation_defs() + defs.extend( + [ + BoolDef("farm", label="Submit to Farm"), + NumberDef("priority", label="Farm job Priority", default=50), + BoolDef("refresh", label="Refresh viewport during export"), + BoolDef( + "includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip=( + "Whether to include parent hierarchy of nodes in the " + "publish instance." + ) + ), + BoolDef( + "includeUserDefinedAttributes", + label="Include User Defined Attributes", + tooltip=( + "Whether to include all custom maya attributes found " + "on nodes as attributes in the Alembic data." + ) + ), + ] + ) + + return defs + + +def convert_legacy_alembic_creator_attributes(node_data, class_name): + """This is a legacy transfer of creator attributes to publish attributes + for ExtractAlembic/ExtractAnimation plugin. + """ + publish_attributes = node_data["publish_attributes"] + + if class_name in publish_attributes: + return node_data + + attributes = [ + "attr", + "attrPrefix", + "visibleOnly", + "writeColorSets", + "writeFaceSets", + "writeNormals", + "renderableOnly", + "visibleOnly", + "worldSpace", + "renderableOnly" + ] + plugin_attributes = {} + for attr in attributes: + if attr not in node_data["creator_attributes"]: + continue + value = node_data["creator_attributes"].pop(attr) + + plugin_attributes[attr] = value + + publish_attributes[class_name] = plugin_attributes + + return node_data + + +class CreateAnimation(plugin.MayaHiddenCreator): + """Animation output for character rigs + + We hide the animation creator from the UI since the creation of it is + automated upon loading a rig. There's an inventory action to recreate it + for loaded rigs if by chance someone deleted the animation instance. + """ + + identifier = "io.openpype.creators.maya.animation" + name = "animationDefault" + label = "Animation" + product_type = "animation" + icon = "male" + + write_color_sets = False + write_face_sets = False + include_parent_hierarchy = False + include_user_defined_attributes = False + + def read_instance_node(self, node): + node_data = super(CreateAnimation, self).read_instance_node(node) + node_data = convert_legacy_alembic_creator_attributes( + node_data, "ExtractAnimation" + ) + return node_data + + def get_instance_attr_defs(self): + defs = super(CreateAnimation, self).get_instance_attr_defs() + defs += _get_animation_attr_defs(self) + return defs + + +class CreatePointCache(plugin.MayaCreator): + """Alembic pointcache for animated data""" + + identifier = "io.openpype.creators.maya.pointcache" + label = "Pointcache" + product_type = "pointcache" + icon = "gears" + write_color_sets = False + write_face_sets = False + include_user_defined_attributes = False + + def read_instance_node(self, node): + node_data = super(CreatePointCache, self).read_instance_node(node) + node_data = convert_legacy_alembic_creator_attributes( + node_data, "ExtractAlembic" + ) + return node_data + + def get_instance_attr_defs(self): + defs = super(CreatePointCache, self).get_instance_attr_defs() + defs += _get_animation_attr_defs(self) + return defs + + def create(self, product_name, instance_data, pre_create_data): + instance = super(CreatePointCache, self).create( + product_name, instance_data, pre_create_data + ) + instance_node = instance.get("instance_node") + + # For Arnold standin proxy + proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) + cmds.sets(proxy_set, forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py index dc0ffb02c1..e321c13ca0 100644 --- a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -1,3 +1,5 @@ +from maya import cmds + from ayon_core.hosts.maya.api import ( lib, plugin @@ -87,16 +89,24 @@ class CreateArnoldSceneSource(plugin.MayaCreator): return defs + +class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource): + """Arnold Scene Source Proxy + + This product type facilitates working with proxy geometry in the viewport. + """ + + identifier = "io.openpype.creators.maya.assproxy" + label = "Arnold Scene Source Proxy" + product_type = "assProxy" + icon = "cube" + def create(self, product_name, instance_data, pre_create_data): - - from maya import cmds - instance = super(CreateArnoldSceneSource, self).create( product_name, instance_data, pre_create_data ) instance_node = instance.get("instance_node") - content = cmds.sets(name=instance_node + "_content_SET", empty=True) proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets([content, proxy], forceElement=instance_node) + cmds.sets([proxy], forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py deleted file mode 100644 index 05e3a1a29f..0000000000 --- a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py +++ /dev/null @@ -1,88 +0,0 @@ -from maya import cmds - -from ayon_core.hosts.maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreatePointCache(plugin.MayaCreator): - """Alembic pointcache for animated data""" - - identifier = "io.openpype.creators.maya.pointcache" - label = "Pointcache" - product_type = "pointcache" - icon = "gears" - write_color_sets = False - write_face_sets = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=False), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=False), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=False), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("refresh", - label="Refresh viewport during export", - default=False), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - default="", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def create(self, product_name, instance_data, pre_create_data): - - instance = super(CreatePointCache, self).create( - product_name, instance_data, pre_create_data - ) - instance_node = instance.get("instance_node") - - # For Arnold standin proxy - proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets(proxy_set, forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py index 839a4dad90..5410546a2e 100644 --- a/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_geometry.py @@ -37,7 +37,7 @@ class ConnectGeometry(InventoryAction): repre_id = container["representation"] repre_context = repre_contexts_by_id[repre_id] - product_type = repre_context["prouct"]["productType"] + product_type = repre_context["product"]["productType"] containers_by_product_type.setdefault(product_type, []) containers_by_product_type[product_type].append(container) diff --git a/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py index bf9e679928..166c419072 100644 --- a/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_xgen.py @@ -36,7 +36,7 @@ class ConnectXgen(InventoryAction): repre_id = container["representation"] repre_context = repre_contexts_by_id[repre_id] - product_type = repre_context["prouct"]["productType"] + product_type = repre_context["product"]["productType"] containers_by_product_type.setdefault(product_type, []) containers_by_product_type[product_type].append(container) diff --git a/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py index 5916bf7b97..8f13cc6ae5 100644 --- a/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py +++ b/client/ayon_core/hosts/maya/plugins/inventory/connect_yeti_rig.py @@ -39,7 +39,7 @@ class ConnectYetiRig(InventoryAction): repre_id = container["representation"] repre_context = repre_contexts_by_id[repre_id] - product_type = repre_context["prouct"]["productType"] + product_type = repre_context["product"]["productType"] containers_by_product_type.setdefault(product_type, []) containers_by_product_type[product_type].append(container) diff --git a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py index 4b7d2f42ab..ae3b68965a 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py @@ -12,6 +12,7 @@ from ayon_core.hosts.maya.api.lib import ( unique_namespace, get_attribute_input, maintained_selection, + get_fps_for_current_context ) from ayon_core.hosts.maya.api.pipeline import containerise from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type @@ -29,7 +30,13 @@ class ArnoldStandinLoader(load.LoaderPlugin): """Load as Arnold standin""" product_types = { - "ass", "animation", "model", "proxyAbc", "pointcache", "usd" + "ass", + "assProxy", + "animation", + "model", + "proxyAbc", + "pointcache", + "usd" } representations = {"ass", "abc", "usda", "usdc", "usd"} @@ -95,8 +102,10 @@ class ArnoldStandinLoader(load.LoaderPlugin): sequence = is_sequence(os.listdir(os.path.dirname(repre_path))) cmds.setAttr(standin_shape + ".useFrameExtension", sequence) - fps = float(version_attributes.get("fps")) or 25 - cmds.setAttr(standin_shape + ".abcFPS", fps) + fps = ( + version_attributes.get("fps") or get_fps_for_current_context() + ) + cmds.setAttr(standin_shape + ".abcFPS", float(fps)) nodes = [root, standin, standin_shape] if operator is not None: @@ -128,6 +137,18 @@ class ArnoldStandinLoader(load.LoaderPlugin): proxy_path = "/".join([os.path.dirname(path), proxy_basename]) return proxy_basename, proxy_path + def _update_operators(self, string_replace_operator, proxy_basename, path): + cmds.setAttr( + string_replace_operator + ".match", + proxy_basename.split(".")[0], + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path).split(".")[0], + type="string" + ) + def _setup_proxy(self, shape, path, namespace): proxy_basename, proxy_path = self._get_proxy_path(path) @@ -150,16 +171,7 @@ class ArnoldStandinLoader(load.LoaderPlugin): "*.(@node=='{}')".format(node_type), type="string" ) - cmds.setAttr( - string_replace_operator + ".match", - proxy_basename, - type="string" - ) - cmds.setAttr( - string_replace_operator + ".replace", - os.path.basename(path), - type="string" - ) + self._update_operators(string_replace_operator, proxy_basename, path) cmds.connectAttr( string_replace_operator + ".out", @@ -194,18 +206,9 @@ class ArnoldStandinLoader(load.LoaderPlugin): path = get_representation_path(repre_entity) proxy_basename, proxy_path = self._get_proxy_path(path) - # Whether there is proxy or so, we still update the string operator. + # Whether there is proxy or not, we still update the string operator. # If no proxy exists, the string operator won't replace anything. - cmds.setAttr( - string_replace_operator + ".match", - proxy_basename, - type="string" - ) - cmds.setAttr( - string_replace_operator + ".replace", - os.path.basename(path), - type="string" - ) + self._update_operators(string_replace_operator, proxy_basename, path) dso_path = path if os.path.exists(proxy_path): diff --git a/client/ayon_core/hosts/maya/plugins/load/load_as_template.py b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py new file mode 100644 index 0000000000..f696d369e3 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py @@ -0,0 +1,39 @@ +from ayon_core.lib import ( + BoolDef +) +from ayon_core.pipeline import ( + load, + registered_host +) +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaTemplateBuilder +) + + +class LoadAsTemplate(load.LoaderPlugin): + """Load workfile as a template """ + + product_types = {"workfile", "mayaScene"} + label = "Load as template" + representations = ["ma", "mb"] + icon = "wrench" + color = "#775555" + order = 10 + + options = [ + BoolDef("keep_placeholders", + label="Keep Placeholders", + default=False), + BoolDef("create_first_version", + label="Create First Version", + default=False), + ] + + def load(self, context, name, namespace, data): + keep_placeholders = data.get("keep_placeholders", False) + create_first_version = data.get("create_first_version", False) + path = self.filepath_from_context(context) + builder = MayaTemplateBuilder(registered_host()) + builder.build_template(template_path=path, + keep_placeholders=keep_placeholders, + create_first_version=create_first_version) diff --git a/client/ayon_core/hosts/maya/plugins/load/load_image.py b/client/ayon_core/hosts/maya/plugins/load/load_image.py index 5b0858ce70..171920f747 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_image.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_image.py @@ -8,7 +8,7 @@ from ayon_core.pipeline import ( from ayon_core.pipeline.load.utils import get_representation_path_from_context from ayon_core.pipeline.colorspace import ( get_imageio_file_rules_colorspace_from_filepath, - get_imageio_config, + get_current_context_imageio_config_preset, get_imageio_file_rules ) from ayon_core.settings import get_project_settings @@ -270,8 +270,7 @@ class FileNodeLoader(load.LoaderPlugin): host_name = get_current_host_name() project_settings = get_project_settings(project_name) - config_data = get_imageio_config( - project_name, host_name, + config_data = get_current_context_imageio_config_preset( project_settings=project_settings ) diff --git a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py index caea6b7a72..4ca9ae9d03 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py @@ -12,6 +12,7 @@ from ayon_core.pipeline import ( get_representation_path ) from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.yeti import create_yeti_variable from ayon_core.hosts.maya.api.pipeline import containerise from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type @@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = { "viewportDensity", "viewportWidth", "viewportLength", + "renderDensity", + "renderWidth", + "renderLength", + "increaseRenderBounds" } +SKIP_ATTR_MESSAGE = ( + "Skipping updating %s.%s to %s because it " + "is considered a local overridable attribute. " + "Either set manually or the load the cache " + "anew." +) + def set_attribute(node, attr, value): """Wrapper of set attribute which ignores None values""" @@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin): for attr, value in node_settings["attrs"].items(): if attr in SKIP_UPDATE_ATTRS: + self.log.info( + SKIP_ATTR_MESSAGE, yeti_node, attr, value + ) continue set_attribute(attr, value, yeti_node) + # Set up user defined attributes + user_variables = node_settings.get("user_variables", {}) + for attr, value in user_variables.items(): + was_value_set = create_yeti_variable( + yeti_shape_node=yeti_node, + attr_name=attr, + value=value, + # We do not want to update the + # value if it already exists so + # that any local overrides that + # may have been applied still + # persist + force_value=False + ) + if not was_value_set: + self.log.info( + SKIP_ATTR_MESSAGE, yeti_node, attr, value + ) + cmds.setAttr("{}.representation".format(container_node), repre_entity["id"], typ="string") @@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin): for attr, value in attributes.items(): set_attribute(attr, value, yeti_node) + # Set up user defined attributes + user_variables = node_settings.get("user_variables", {}) + for attr, value in user_variables.items(): + create_yeti_variable(yeti_shape_node=yeti_node, + attr_name=attr, + value=value) + # Connect to the time node cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node) diff --git a/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py index bf9525bae3..7444566ee1 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_rig.py @@ -1,8 +1,13 @@ +from typing import List + import maya.cmds as cmds from ayon_core.hosts.maya.api import plugin from ayon_core.hosts.maya.api import lib +from ayon_core.pipeline import registered_host +from ayon_core.pipeline.create import CreateContext + class YetiRigLoader(plugin.ReferenceLoader): """This loader will load Yeti rig.""" @@ -15,6 +20,9 @@ class YetiRigLoader(plugin.ReferenceLoader): icon = "code-fork" color = "orange" + # From settings + create_cache_instance_on_load = True + def process_reference( self, context, name=None, namespace=None, options=None ): @@ -49,4 +57,41 @@ class YetiRigLoader(plugin.ReferenceLoader): ) self[:] = nodes + if self.create_cache_instance_on_load: + # Automatically create in instance to allow publishing the loaded + # yeti rig into a yeti cache + self._create_yeti_cache_instance(nodes, variant=namespace) + return nodes + + def _create_yeti_cache_instance(self, nodes: List[str], variant: str): + """Create a yeticache product type instance to publish the output. + + This is similar to how loading animation rig will automatically create + an animation instance for publishing any loaded character rigs, but + then for yeti rigs. + + Args: + nodes (List[str]): Nodes generated on load. + variant (str): Variant for the yeti cache instance to create. + + """ + + # Find the roots amongst the loaded nodes + yeti_nodes = cmds.ls(nodes, type="pgYetiMaya", long=True) + assert yeti_nodes, "No pgYetiMaya nodes in rig, this is a bug." + + self.log.info("Creating variant: {}".format(variant)) + + creator_identifier = "io.openpype.creators.maya.yeticache" + + host = registered_host() + create_context = CreateContext(host) + + with lib.maintained_selection(): + cmds.select(yeti_nodes, noExpand=True) + create_context.create( + creator_identifier=creator_identifier, + variant=variant, + pre_create_data={"use_selection": True} + ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py index 2ab6511ece..391c80c84e 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py @@ -58,4 +58,3 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): if instance.data.get("farm"): instance.data["families"].append("publish.farm") - diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py index 0db89bee31..fb71e128eb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -10,21 +10,23 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): # Offset to be after renderable camera collection. order = pyblish.api.CollectorOrder + 0.2 label = "Collect Arnold Scene Source" - families = ["ass"] + families = ["ass", "assProxy"] def process(self, instance): - objsets = instance.data["setMembers"] + instance.data["members"] = [] + for set_member in instance.data["setMembers"]: + if cmds.nodeType(set_member) != "objectSet": + instance.data["members"].extend(self.get_hierarchy(set_member)) + continue - for objset in objsets: - objset = str(objset) - members = cmds.sets(objset, query=True) + members = cmds.sets(set_member, query=True) members = cmds.ls(members, long=True) if members is None: - self.log.warning("Skipped empty instance: \"%s\" " % objset) + self.log.warning( + "Skipped empty instance: \"%s\" " % set_member + ) continue - if objset.endswith("content_SET"): - instance.data["contentMembers"] = self.get_hierarchy(members) - if objset.endswith("proxy_SET"): + if set_member.endswith("proxy_SET"): instance.data["proxy"] = self.get_hierarchy(members) # Use camera in object set if present else default to render globals @@ -33,7 +35,7 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] if renderable: camera = renderable[0] - for node in instance.data["contentMembers"]: + for node in instance.data["members"]: camera_shapes = cmds.listRelatives( node, shapes=True, type="camera" ) @@ -46,18 +48,11 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): self.log.debug("data: {}".format(instance.data)) def get_hierarchy(self, nodes): - """Return nodes with all their children. - - Arguments: - nodes (List[str]): List of nodes to collect children hierarchy for - - Returns: - list: Input nodes with their children hierarchy - - """ + """Return nodes with all their children""" nodes = cmds.ls(nodes, long=True) if not nodes: return [] - - children = get_all_children(nodes, ignore_intermediate_objects=True) - return list(children.union(nodes)) + children = get_all_children(nodes) + # Make sure nodes merged with children only + # contains unique entries + return list(set(nodes + list(children))) diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py b/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py index 93b46c511b..60853bd1ee 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_file_dependencies.py @@ -12,7 +12,7 @@ class CollectFileDependencies(pyblish.api.ContextPlugin): families = ["renderlayer"] @classmethod - def apply_settings(cls, project_settings, system_settings): + def apply_settings(cls, project_settings): # Disable plug-in if not used for deadline submission anyway settings = project_settings["deadline"]["publish"]["MayaSubmitDeadline"] # noqa cls.enabled = settings.get("asset_dependencies", True) diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py index 16fef2e168..3d586d48fb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py @@ -14,7 +14,9 @@ class CollectUserDefinedAttributes(pyblish.api.InstancePlugin): def process(self, instance): # Collect user defined attributes. - if not instance.data.get("includeUserDefinedAttributes", False): + if not instance.data["creator_attributes"].get( + "includeUserDefinedAttributes" + ): return if "out_hierarchy" in instance.data: diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py index 067a7bc532..e1755e4212 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py @@ -3,6 +3,7 @@ from maya import cmds import pyblish.api from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables SETTINGS = { @@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin): - "increaseRenderBounds" - "imageSearchPath" - Other information is the name of the transform and it's Colorbleed ID + Other information is the name of the transform and its `cbId` """ order = pyblish.api.CollectorOrder + 0.45 @@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin): # Get specific node attributes attr_data = {} for attr in SETTINGS: + # Ignore non-existing attributes with a warning, e.g. cbId + # if they have not been generated yet + if not cmds.attributeQuery(attr, node=shape, exists=True): + self.log.warning( + "Attribute '{}' not found on Yeti node: {}".format( + attr, shape + ) + ) + continue + current = cmds.getAttr("%s.%s" % (shape, attr)) # change None to empty string as Maya doesn't support # NoneType in attributes @@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin): current = "" attr_data[attr] = current + # Get user variable attributes + user_variable_attrs = { + attr: lib.get_attribute("{}.{}".format(shape, attr)) + for attr in get_yeti_user_variables(shape) + } + # Get transform data parent = cmds.listRelatives(shape, parent=True)[0] transform_data = {"name": parent, "cbId": lib.get_id(parent)} @@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin): "name": shape, "cbId": lib.get_id(shape), "attrs": attr_data, + "user_variables": user_variable_attrs } settings["nodes"].append(shape_data) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py index ed8f2ad40c..fb4c41f1de 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -17,8 +17,7 @@ class ExtractArnoldSceneSource(publish.Extractor): families = ["ass"] asciiAss = False - def process(self, instance): - staging_dir = self.staging_dir(instance) + def _pre_process(self, instance, staging_dir): file_path = os.path.join(staging_dir, "{}.ass".format(instance.name)) # Mask @@ -70,24 +69,38 @@ class ExtractArnoldSceneSource(publish.Extractor): "mask": mask } - filenames, nodes_by_id = self._extract( - instance.data["contentMembers"], attribute_data, kwargs - ) - if "representations" not in instance.data: instance.data["representations"] = [] + return attribute_data, kwargs + + def process(self, instance): + staging_dir = self.staging_dir(instance) + attribute_data, kwargs = self._pre_process(instance, staging_dir) + + filenames = self._extract( + instance.data["members"], attribute_data, kwargs + ) + + self._post_process( + instance, filenames, staging_dir, kwargs["startFrame"] + ) + + def _post_process(self, instance, filenames, staging_dir, frame_start): + nodes_by_id = self._nodes_by_id(instance[:]) representation = { "name": "ass", "ext": "ass", "files": filenames if len(filenames) > 1 else filenames[0], "stagingDir": staging_dir, - "frameStart": kwargs["startFrame"] + "frameStart": frame_start } instance.data["representations"].append(representation) - json_path = os.path.join(staging_dir, "{}.json".format(instance.name)) + json_path = os.path.join( + staging_dir, "{}.json".format(instance.name) + ) with open(json_path, "w") as f: json.dump(nodes_by_id, f) @@ -104,13 +117,68 @@ class ExtractArnoldSceneSource(publish.Extractor): "Extracted instance {} to: {}".format(instance.name, staging_dir) ) - # Extract proxy. - if not instance.data.get("proxy", []): - return + def _nodes_by_id(self, nodes): + nodes_by_id = defaultdict(list) - kwargs["filename"] = file_path.replace(".ass", "_proxy.ass") + for node in nodes: + id = lib.get_id(node) - filenames, _ = self._extract( + if id is None: + continue + + # Converting Maya hierarchy separator "|" to Arnold separator "/". + nodes_by_id[id].append(node.replace("|", "/")) + + return nodes_by_id + + def _extract(self, nodes, attribute_data, kwargs): + filenames = [] + with lib.attribute_values(attribute_data): + with lib.maintained_selection(): + self.log.debug( + "Writing: {}".format(nodes) + ) + cmds.select(nodes, noExpand=True) + + self.log.debug( + "Extracting ass sequence with: {}".format(kwargs) + ) + + exported_files = cmds.arnoldExportAss(**kwargs) + + for file in exported_files: + filenames.append(os.path.split(file)[1]) + + self.log.debug("Exported: {}".format(filenames)) + + return filenames + + +class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource): + """Extract the content of the instance to an Arnold Scene Source file.""" + + label = "Extract Arnold Scene Source Proxy" + hosts = ["maya"] + families = ["assProxy"] + asciiAss = True + + def process(self, instance): + staging_dir = self.staging_dir(instance) + attribute_data, kwargs = self._pre_process(instance, staging_dir) + + filenames, _ = self._duplicate_extract( + instance.data["members"], attribute_data, kwargs + ) + + self._post_process( + instance, filenames, staging_dir, kwargs["startFrame"] + ) + + kwargs["filename"] = os.path.join( + staging_dir, "{}_proxy.ass".format(instance.name) + ) + + filenames, _ = self._duplicate_extract( instance.data["proxy"], attribute_data, kwargs ) @@ -125,12 +193,11 @@ class ExtractArnoldSceneSource(publish.Extractor): instance.data["representations"].append(representation) - def _extract(self, nodes, attribute_data, kwargs): + def _duplicate_extract(self, nodes, attribute_data, kwargs): self.log.debug( "Writing {} with:\n{}".format(kwargs["filename"], kwargs) ) filenames = [] - nodes_by_id = defaultdict(list) # Duplicating nodes so they are direct children of the world. This # makes the hierarchy of any exported ass file the same. with lib.delete_after() as delete_bin: @@ -147,7 +214,9 @@ class ExtractArnoldSceneSource(publish.Extractor): if not shapes: continue - duplicate_transform = cmds.duplicate(node)[0] + basename = cmds.duplicate(node)[0] + parents = cmds.ls(node, long=True)[0].split("|")[:-1] + duplicate_transform = "|".join(parents + [basename]) if cmds.listRelatives(duplicate_transform, parent=True): duplicate_transform = cmds.parent( @@ -172,28 +241,7 @@ class ExtractArnoldSceneSource(publish.Extractor): duplicate_nodes.extend(shapes) delete_bin.append(duplicate_transform) - # Copy cbId to mtoa_constant. - for node in duplicate_nodes: - # Converting Maya hierarchy separator "|" to Arnold - # separator "/". - nodes_by_id[lib.get_id(node)].append(node.replace("|", "/")) - - with lib.attribute_values(attribute_data): - with lib.maintained_selection(): - self.log.debug( - "Writing: {}".format(duplicate_nodes) - ) - cmds.select(duplicate_nodes, noExpand=True) - - self.log.debug( - "Extracting ass sequence with: {}".format(kwargs) - ) - - exported_files = cmds.arnoldExportAss(**kwargs) - - for file in exported_files: - filenames.append(os.path.split(file)[1]) - - self.log.debug("Exported: {}".format(filenames)) + nodes_by_id = self._nodes_by_id(duplicate_nodes) + filenames = self._extract(duplicate_nodes, attribute_data, kwargs) return filenames, nodes_by_id diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py index 2c23f9b752..5f51dc38cb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py @@ -2,7 +2,7 @@ import os import json from ayon_core.pipeline import publish -from ayon_core.hosts.maya.api.lib import extract_alembic +from ayon_core.hosts.maya.api.alembic import extract_alembic from maya import cmds diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py index ee66ed2fb7..77b5b79b5f 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py @@ -35,7 +35,8 @@ class ExtractFBXAnimation(publish.Extractor): fbx_exporter = fbx.FBXExtractor(log=self.log) out_members = instance.data.get("animated_skeleton", []) # Export - instance.data["constraints"] = True + # TODO: need to set up the options for users to set up + # the flags they intended to export instance.data["skeletonDefinitions"] = True instance.data["referencedAssetsContent"] = True fbx_exporter.set_options_from_instance(instance) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py index 5de72f7674..cc930e49cc 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py @@ -1,17 +1,29 @@ import os +from collections import OrderedDict from maya import cmds from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, + get_all_children, suspended_refresh, maintained_selection, iter_visible_nodes_in_range ) +from ayon_core.lib import ( + BoolDef, + TextDef, + NumberDef, + EnumDef, + UISeparatorDef, + UILabelDef, +) +from ayon_core.pipeline.publish import AYONPyblishPluginMixin +from ayon_core.pipeline import KnownPublishError -class ExtractAlembic(publish.Extractor): +class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin): """Produce an alembic of just point positions and normals. Positions and normals, uvs, creases are preserved, but nothing more, @@ -27,8 +39,35 @@ class ExtractAlembic(publish.Extractor): targets = ["local", "remote"] # From settings + attr = [] + attrPrefix = [] bake_attributes = [] bake_attribute_prefixes = [] + dataFormat = "ogawa" + eulerFilter = False + melPerFrameCallback = "" + melPostJobCallback = "" + overrides = [] + preRoll = False + preRollStartFrame = 0 + pythonPerFrameCallback = "" + pythonPostJobCallback = "" + renderableOnly = False + stripNamespaces = True + uvsOnly = False + uvWrite = False + userAttr = "" + userAttrPrefix = "" + verbose = False + visibleOnly = False + wholeFrameGeo = False + worldSpace = True + writeColorSets = False + writeCreases = False + writeFaceSets = False + writeNormals = True + writeUVSets = False + writeVisibility = False def process(self, instance): if instance.data.get("farm"): @@ -41,16 +80,38 @@ class ExtractAlembic(publish.Extractor): start = float(instance.data.get("frameStartHandle", 1)) end = float(instance.data.get("frameEndHandle", 1)) - attrs = instance.data.get("attr", "").split(";") - attrs = [value for value in attrs if value.strip()] + attribute_values = self.get_attr_values_from_data( + instance.data + ) + + attrs = [ + attr.strip() + for attr in attribute_values.get("attr", "").split(";") + if attr.strip() + ] attrs += instance.data.get("userDefinedAttributes", []) attrs += self.bake_attributes attrs += ["cbId"] - attr_prefixes = instance.data.get("attrPrefix", "").split(";") - attr_prefixes = [value for value in attr_prefixes if value.strip()] + attr_prefixes = [ + attr.strip() + for attr in attribute_values.get("attrPrefix", "").split(";") + if attr.strip() + ] attr_prefixes += self.bake_attribute_prefixes + user_attrs = [ + attr.strip() + for attr in attribute_values.get("userAttr", "").split(";") + if attr.strip() + ] + + user_attr_prefixes = [ + attr.strip() + for attr in attribute_values.get("userAttrPrefix", "").split(";") + if attr.strip() + ] + self.log.debug("Extracting pointcache..") dirname = self.staging_dir(instance) @@ -58,28 +119,82 @@ class ExtractAlembic(publish.Extractor): filename = "{name}.abc".format(**instance.data) path = os.path.join(parent_dir, filename) - options = { - "step": instance.data.get("step", 1.0), - "attr": attrs, - "attrPrefix": attr_prefixes, - "writeVisibility": True, - "writeCreases": True, - "writeColorSets": instance.data.get("writeColorSets", False), - "writeFaceSets": instance.data.get("writeFaceSets", False), - "uvWrite": True, - "selection": True, - "worldSpace": instance.data.get("worldSpace", True) - } - + root = None if not instance.data.get("includeParentHierarchy", True): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = roots + root = roots - if int(cmds.about(version=True)) >= 2017: - # Since Maya 2017 alembic supports multiple uv sets - write them. - options["writeUVSets"] = True + kwargs = { + "file": path, + "attr": attrs, + "attrPrefix": attr_prefixes, + "userAttr": user_attrs, + "userAttrPrefix": user_attr_prefixes, + "dataFormat": attribute_values.get("dataFormat", self.dataFormat), + "endFrame": end, + "eulerFilter": attribute_values.get( + "eulerFilter", self.eulerFilter + ), + "preRoll": attribute_values.get("preRoll", self.preRoll), + "preRollStartFrame": attribute_values.get( + "preRollStartFrame", self.preRollStartFrame + ), + "renderableOnly": attribute_values.get( + "renderableOnly", self.renderableOnly + ), + "root": root, + "selection": True, + "startFrame": start, + "step": instance.data.get( + "creator_attributes", {} + ).get("step", 1.0), + "stripNamespaces": attribute_values.get( + "stripNamespaces", self.stripNamespaces + ), + "uvWrite": attribute_values.get("uvWrite", self.uvWrite), + "verbose": attribute_values.get("verbose", self.verbose), + "wholeFrameGeo": attribute_values.get( + "wholeFrameGeo", self.wholeFrameGeo + ), + "worldSpace": attribute_values.get("worldSpace", self.worldSpace), + "writeColorSets": attribute_values.get( + "writeColorSets", self.writeColorSets + ), + "writeCreases": attribute_values.get( + "writeCreases", self.writeCreases + ), + "writeFaceSets": attribute_values.get( + "writeFaceSets", self.writeFaceSets + ), + "writeUVSets": attribute_values.get( + "writeUVSets", self.writeUVSets + ), + "writeVisibility": attribute_values.get( + "writeVisibility", self.writeVisibility + ), + "uvsOnly": attribute_values.get( + "uvsOnly", self.uvsOnly + ), + "melPerFrameCallback": attribute_values.get( + "melPerFrameCallback", self.melPerFrameCallback + ), + "melPostJobCallback": attribute_values.get( + "melPostJobCallback", self.melPostJobCallback + ), + "pythonPerFrameCallback": attribute_values.get( + "pythonPerFrameCallback", self.pythonPostJobCallback + ), + "pythonPostJobCallback": attribute_values.get( + "pythonPostJobCallback", self.pythonPostJobCallback + ), + # Note that this converts `writeNormals` to `noNormals` for the + # `AbcExport` equivalent in `extract_alembic` + "noNormals": not attribute_values.get( + "writeNormals", self.writeNormals + ), + } if instance.data.get("visibleOnly", False): # If we only want to include nodes that are visible in the frame @@ -87,20 +202,19 @@ class ExtractAlembic(publish.Extractor): # flag does not filter out those that are only hidden on some # frames as it counts "animated" or "connected" visibilities as # if it's always visible. - nodes = list(iter_visible_nodes_in_range(nodes, - start=start, - end=end)) + nodes = list( + iter_visible_nodes_in_range(nodes, start=start, end=end) + ) suspend = not instance.data.get("refresh", False) with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(nodes, noExpand=True) - extract_alembic( - file=path, - startFrame=start, - endFrame=end, - **options + self.log.debug( + "Running `extract_alembic` with the keyword arguments: " + "{}".format(kwargs) ) + extract_alembic(**kwargs) if "representations" not in instance.data: instance.data["representations"] = [] @@ -124,22 +238,17 @@ class ExtractAlembic(publish.Extractor): return path = path.replace(".abc", "_proxy.abc") + kwargs["file"] = path if not instance.data.get("includeParentHierarchy", True): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = instance.data["proxyRoots"] + kwargs["root"] = instance.data["proxyRoots"] with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(instance.data["proxy"]) - extract_alembic( - file=path, - startFrame=start, - endFrame=end, - **options - ) - + extract_alembic(**kwargs) representation = { "name": "proxy", "ext": "abc", @@ -152,24 +261,265 @@ class ExtractAlembic(publish.Extractor): def get_members_and_roots(self, instance): return instance[:], instance.data.get("setMembers") + @classmethod + def get_attribute_defs(cls): + if not cls.overrides: + return [] + + override_defs = OrderedDict({ + "eulerFilter": BoolDef( + "eulerFilter", + label="Euler Filter", + default=cls.eulerFilter, + tooltip="Apply Euler filter while sampling rotations." + ), + "renderableOnly": BoolDef( + "renderableOnly", + label="Renderable Only", + default=cls.renderableOnly, + tooltip="Only export renderable visible shapes." + ), + "stripNamespaces": BoolDef( + "stripNamespaces", + label="Strip Namespaces", + default=cls.stripNamespaces, + tooltip=( + "Namespaces will be stripped off of the node before being " + "written to Alembic." + ) + ), + "uvsOnly": BoolDef( + "uvsOnly", + label="UVs Only", + default=cls.uvsOnly, + tooltip=( + "If this flag is present, only uv data for PolyMesh and " + "SubD shapes will be written to the Alembic file." + ) + ), + "uvWrite": BoolDef( + "uvWrite", + label="UV Write", + default=cls.uvWrite, + tooltip=( + "Uv data for PolyMesh and SubD shapes will be written to " + "the Alembic file." + ) + ), + "verbose": BoolDef( + "verbose", + label="Verbose", + default=cls.verbose, + tooltip="Prints the current frame that is being evaluated." + ), + "visibleOnly": BoolDef( + "visibleOnly", + label="Visible Only", + default=cls.visibleOnly, + tooltip="Only export dag objects visible during frame range." + ), + "wholeFrameGeo": BoolDef( + "wholeFrameGeo", + label="Whole Frame Geo", + default=cls.wholeFrameGeo, + tooltip=( + "Data for geometry will only be written out on whole " + "frames." + ) + ), + "worldSpace": BoolDef( + "worldSpace", + label="World Space", + default=cls.worldSpace, + tooltip="Any root nodes will be stored in world space." + ), + "writeColorSets": BoolDef( + "writeColorSets", + label="Write Color Sets", + default=cls.writeColorSets, + tooltip="Write vertex colors with the geometry." + ), + "writeCreases": BoolDef( + "writeCreases", + label="Write Creases", + default=cls.writeCreases, + tooltip="Write the geometry's edge and vertex crease " + "information." + ), + "writeFaceSets": BoolDef( + "writeFaceSets", + label="Write Face Sets", + default=cls.writeFaceSets, + tooltip="Write face sets with the geometry." + ), + "writeNormals": BoolDef( + "writeNormals", + label="Write Normals", + default=cls.writeNormals, + tooltip="Write normals with the deforming geometry." + ), + "writeUVSets": BoolDef( + "writeUVSets", + label="Write UV Sets", + default=cls.writeUVSets, + tooltip=( + "Write all uv sets on MFnMeshes as vector 2 indexed " + "geometry parameters with face varying scope." + ) + ), + "writeVisibility": BoolDef( + "writeVisibility", + label="Write Visibility", + default=cls.writeVisibility, + tooltip=( + "Visibility state will be stored in the Alembic file. " + "Otherwise everything written out is treated as visible." + ) + ), + "preRoll": BoolDef( + "preRoll", + label="Pre Roll", + default=cls.preRoll, + tooltip="This frame range will not be sampled." + ), + "preRollStartFrame": NumberDef( + "preRollStartFrame", + label="Pre Roll Start Frame", + tooltip=( + "The frame to start scene evaluation at. This is used" + " to set the starting frame for time dependent " + "translations and can be used to evaluate run-up that" + " isn't actually translated." + ), + default=cls.preRollStartFrame + ), + "dataFormat": EnumDef( + "dataFormat", + label="Data Format", + items=["ogawa", "HDF"], + default=cls.dataFormat, + tooltip="The data format to use to write the file." + ), + "attr": TextDef( + "attr", + label="Custom Attributes", + placeholder="attr1; attr2; ...", + default=cls.attr, + tooltip=( + "Attributes matching by name will be included in the " + "Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "attrPrefix": TextDef( + "attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1; prefix2; ...", + default=cls.attrPrefix, + tooltip=( + "Attributes starting with these prefixes will be included " + "in the Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "userAttr": TextDef( + "userAttr", + label="User Attr", + placeholder="attr1; attr2; ...", + default=cls.userAttr, + tooltip=( + "Attributes matching by name will be included in the " + "Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "userAttrPrefix": TextDef( + "userAttrPrefix", + label="User Attr Prefix", + placeholder="prefix1; prefix2; ...", + default=cls.userAttrPrefix, + tooltip=( + "Attributes starting with these prefixes will be included " + "in the Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "melPerFrameCallback": TextDef( + "melPerFrameCallback", + label="Mel Per Frame Callback", + default=cls.melPerFrameCallback, + tooltip=( + "When each frame (and the static frame) is evaluated the " + "string specified is evaluated as a Mel command." + ) + ), + "melPostJobCallback": TextDef( + "melPostJobCallback", + label="Mel Post Job Callback", + default=cls.melPostJobCallback, + tooltip=( + "When the translation has finished the string specified " + "is evaluated as a Mel command." + ) + ), + "pythonPerFrameCallback": TextDef( + "pythonPerFrameCallback", + label="Python Per Frame Callback", + default=cls.pythonPerFrameCallback, + tooltip=( + "When each frame (and the static frame) is evaluated the " + "string specified is evaluated as a python command." + ) + ), + "pythonPostJobCallback": TextDef( + "pythonPostJobCallback", + label="Python Post Frame Callback", + default=cls.pythonPostJobCallback, + tooltip=( + "When the translation has finished the string specified " + "is evaluated as a python command." + ) + ) + }) + + defs = super(ExtractAlembic, cls).get_attribute_defs() + + defs.extend([ + UISeparatorDef("sep_alembic_options"), + UILabelDef("Alembic Options"), + ]) + + # The Arguments that can be modified by the Publisher + overrides = set(cls.overrides) + for key, value in override_defs.items(): + if key not in overrides: + continue + + defs.append(value) + + defs.append( + UISeparatorDef("sep_alembic_options_end") + ) + + return defs + class ExtractAnimation(ExtractAlembic): - label = "Extract Animation" + label = "Extract Animation (Alembic)" families = ["animation"] def get_members_and_roots(self, instance): - # Collect the out set nodes out_sets = [node for node in instance if node.endswith("out_SET")] if len(out_sets) != 1: - raise RuntimeError("Couldn't find exactly one out_SET: " - "{0}".format(out_sets)) + raise KnownPublishError( + "Couldn't find exactly one out_SET: {0}".format(out_sets) + ) out_set = out_sets[0] - roots = cmds.sets(out_set, query=True) + roots = cmds.sets(out_set, query=True) or [] # Include all descendants - nodes = roots + cmds.listRelatives(roots, - allDescendents=True, - fullPath=True) or [] + nodes = roots.copy() + nodes.extend(get_all_children(roots, ignore_intermediate_objects=True)) return nodes, roots diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py index 3637a58614..5aefdfc33a 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py @@ -3,8 +3,8 @@ import os from maya import cmds from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, suspended_refresh, maintained_selection, iter_visible_nodes_in_range diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py b/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py index 9286869c60..66dd805437 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_redshift_proxy.py @@ -5,7 +5,13 @@ import os from maya import cmds from ayon_core.pipeline import publish -from ayon_core.hosts.maya.api.lib import maintained_selection +from ayon_core.hosts.maya.api.lib import ( + maintained_selection, + renderlayer +) +from ayon_core.hosts.maya.api.render_setup_tools import ( + allow_export_from_render_setup_layer +) class ExtractRedshiftProxy(publish.Extractor): @@ -18,6 +24,9 @@ class ExtractRedshiftProxy(publish.Extractor): def process(self, instance): """Extractor entry point.""" + # Make sure Redshift is loaded + cmds.loadPlugin("redshift4maya", quiet=True) + staging_dir = self.staging_dir(instance) file_name = "{}.rs".format(instance.name) file_path = os.path.join(staging_dir, file_name) @@ -60,14 +69,22 @@ class ExtractRedshiftProxy(publish.Extractor): # Write out rs file self.log.debug("Writing: '%s'" % file_path) + + # Allow overriding what renderlayer to export from. By default force + # it to the default render layer. (Note that the renderlayer isn't + # currently exposed as an attribute to artists) + layer = instance.data.get("renderLayer", "defaultRenderLayer") + with maintained_selection(): - cmds.select(instance.data["setMembers"], noExpand=True) - cmds.file(file_path, - pr=False, - force=True, - type="Redshift Proxy", - exportSelected=True, - options=rs_options) + with renderlayer(layer): + with allow_export_from_render_setup_layer(): + cmds.select(instance.data["setMembers"], noExpand=True) + cmds.file(file_path, + preserveReferences=False, + force=True, + type="Redshift Proxy", + exportSelected=True, + options=rs_options) if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py index 1a389f3d33..b5cc7745a1 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py @@ -5,8 +5,8 @@ import os from maya import cmds # noqa from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, suspended_refresh, maintained_selection ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py index d799486184..54d295b479 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py @@ -5,7 +5,7 @@ import copy from maya import cmds import pyblish.api -from ayon_core.hosts.maya.api.lib import extract_alembic +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.pipeline import publish diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py new file mode 100644 index 0000000000..11f4c313fa --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py @@ -0,0 +1,130 @@ +import inspect +import pyblish.api + +from ayon_core.pipeline import OptionalPyblishPluginMixin +from ayon_core.pipeline.publish import RepairAction, PublishValidationError + + +class ValidateAlembicDefaultsPointcache( + pyblish.api.InstancePlugin, OptionalPyblishPluginMixin +): + """Validate the attributes on the instance are defaults. + + The defaults are defined in the project settings. + """ + + order = pyblish.api.ValidatorOrder + families = ["pointcache"] + hosts = ["maya"] + label = "Validate Alembic Options Defaults" + actions = [RepairAction] + optional = True + + plugin_name = "ExtractAlembic" + + @classmethod + def _get_settings(cls, context): + maya_settings = context.data["project_settings"]["maya"] + settings = maya_settings["publish"]["ExtractAlembic"] + return settings + + @classmethod + def _get_publish_attributes(cls, instance): + return instance.data["publish_attributes"][cls.plugin_name] + + def process(self, instance): + if not self.is_active(instance.data): + return + + settings = self._get_settings(instance.context) + attributes = self._get_publish_attributes(instance) + + invalid = {} + for key, value in attributes.items(): + if key not in settings: + # This may occur if attributes have changed over time and an + # existing instance has older legacy attributes that do not + # match the current settings definition. + self.log.warning( + "Publish attribute %s not found in Alembic Export " + "default settings. Ignoring validation for attribute.", + key + ) + continue + + default_value = settings[key] + + # Lists are best to compared sorted since we cant rely on the order + # of the items. + if isinstance(value, list): + value = sorted(value) + default_value = sorted(default_value) + + if value != default_value: + invalid[key] = value, default_value + + if invalid: + non_defaults = "\n".join( + f"- {key}: {value} \t(default: {default_value})" + for key, (value, default_value) in invalid.items() + ) + + raise PublishValidationError( + "Alembic extract options differ from default values:\n" + f"{non_defaults}", + description=self.get_description() + ) + + @staticmethod + def get_description(): + return inspect.cleandoc( + """### Alembic Extract settings differ from defaults + + The alembic export options differ from the project default values. + + If this is intentional you can disable this validation by + disabling **Validate Alembic Options Default**. + + If not you may use the "Repair" action to revert all the options to + their default values. + + """ + ) + + @classmethod + def repair(cls, instance): + # Find create instance twin. + create_context = instance.context.data["create_context"] + create_instance = create_context.get_instance_by_id( + instance.data["instance_id"] + ) + + # Set the settings values on the create context then save to workfile. + settings = cls._get_settings(instance.context) + attributes = cls._get_publish_attributes(create_instance) + for key in attributes: + if key not in settings: + # This may occur if attributes have changed over time and an + # existing instance has older legacy attributes that do not + # match the current settings definition. + cls.log.warning( + "Publish attribute %s not found in Alembic Export " + "default settings. Ignoring repair for attribute.", + key + ) + continue + attributes[key] = settings[key] + + create_context.save_changes() + + +class ValidateAlembicDefaultsAnimation( + ValidateAlembicDefaultsPointcache +): + """Validate the attributes on the instance are defaults. + + The defaults are defined in the project settings. + """ + label = "Validate Alembic Options Defaults" + families = ["animation"] + plugin_name = "ExtractAnimation" diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py b/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py deleted file mode 100644 index 2ba2bff6fc..0000000000 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py +++ /dev/null @@ -1,71 +0,0 @@ -import pyblish.api -import ayon_core.hosts.maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, - OptionalPyblishPluginMixin -) -from maya import cmds - - -class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validate all nodes in skeletonAnim_SET are referenced""" - - order = ValidateContentsOrder - hosts = ["maya"] - families = ["animation.fbx"] - label = "Animated Reference Rig" - accepted_controllers = ["transform", "locator"] - actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - animated_sets = instance.data.get("animated_skeleton", []) - if not animated_sets: - self.log.debug( - "No nodes found in skeletonAnim_SET. " - "Skipping validation of animated reference rig..." - ) - return - - for animated_reference in animated_sets: - is_referenced = cmds.referenceQuery( - animated_reference, isNodeReferenced=True) - if not bool(is_referenced): - raise PublishValidationError( - "All the content in skeletonAnim_SET" - " should be referenced nodes" - ) - invalid_controls = self.validate_controls(animated_sets) - if invalid_controls: - raise PublishValidationError( - "All the content in skeletonAnim_SET" - " should be transforms" - ) - - @classmethod - def validate_controls(self, set_members): - """Check if the controller set contains only accepted node types. - - Checks if all its set members are within the hierarchy of the root - Checks if the node types of the set members valid - - Args: - set_members: list of nodes of the skeleton_anim_set - hierarchy: list of nodes which reside under the root node - - Returns: - errors (list) - """ - - # Validate control types - invalid = [] - set_members = cmds.ls(set_members, long=True) - for node in set_members: - if cmds.nodeType(node) not in self.accepted_controllers: - invalid.append(node) - - return invalid diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py index 92b4922492..8574b3ecc8 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -1,30 +1,56 @@ +from maya import cmds + import pyblish.api + from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) +from ayon_core.hosts.maya.api.lib import is_visible class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): """Validate Arnold Scene Source. - We require at least 1 root node/parent for the meshes. This is to ensure we - can duplicate the nodes and preserve the names. + Ensure no nodes are hidden. + """ - If using proxies we need the nodes to share the same names and not be + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass", "assProxy"] + label = "Validate Arnold Scene Source" + + def process(self, instance): + # Validate against having nodes hidden, which will result in the + # extraction to ignore the node. + nodes = instance.data["members"] + instance.data.get("proxy", []) + nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')] + hidden_nodes = [ + x for x in nodes if not is_visible(x, intermediateObject=False) + ] + if hidden_nodes: + raise PublishValidationError( + "Found hidden nodes:\n\n{}\n\nPlease unhide for" + " publishing.".format("\n".join(hidden_nodes)) + ) + + +class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source Proxy. + + When using proxies we need the nodes to share the same names and not be parent to the world. This ends up needing at least two groups with content nodes and proxy nodes in another. """ order = ValidateContentsOrder hosts = ["maya"] - families = ["ass"] - label = "Validate Arnold Scene Source" + families = ["assProxy"] + label = "Validate Arnold Scene Source Proxy" def _get_nodes_by_name(self, nodes): ungrouped_nodes = [] nodes_by_name = {} parents = [] - same_named_nodes = {} for node in nodes: node_split = node.split("|") if len(node_split) == 2: @@ -35,33 +61,16 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): parents.append(parent) node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - - # Check for same same nodes, which can happen in different - # hierarchies. - if node_name in nodes_by_name: - try: - same_named_nodes[node_name].append(node) - except KeyError: - same_named_nodes[node_name] = [ - nodes_by_name[node_name], node - ] - nodes_by_name[node_name] = node - if same_named_nodes: - message = "Found nodes with the same name:" - for name, nodes in same_named_nodes.items(): - message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes)) - - raise PublishValidationError(message) - return ungrouped_nodes, nodes_by_name, parents def process(self, instance): + # Validate against nodes directly parented to world. ungrouped_nodes = [] nodes, content_nodes_by_name, content_parents = ( - self._get_nodes_by_name(instance.data["contentMembers"]) + self._get_nodes_by_name(instance.data["members"]) ) ungrouped_nodes.extend(nodes) @@ -70,24 +79,21 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): ) ungrouped_nodes.extend(nodes) - # Validate against nodes directly parented to world. if ungrouped_nodes: raise PublishValidationError( "Found nodes parented to the world: {}\n" "All nodes need to be grouped.".format(ungrouped_nodes) ) - # Proxy validation. - if not instance.data.get("proxy", []): - return - # Validate for content and proxy nodes amount being the same. - if len(instance.data["contentMembers"]) != len(instance.data["proxy"]): + if len(instance.data["members"]) != len(instance.data["proxy"]): raise PublishValidationError( "Amount of content nodes ({}) and proxy nodes ({}) needs to " - "be the same.".format( - len(instance.data["contentMembers"]), - len(instance.data["proxy"]) + "be the same.\nContent nodes: {}\nProxy nodes:{}".format( + len(instance.data["members"]), + len(instance.data["proxy"]), + instance.data["members"], + instance.data["proxy"] ) ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py index a9d896952d..e5dbe178fc 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py @@ -17,7 +17,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, order = ValidateContentsOrder hosts = ["maya"] - families = ["ass"] + families = ["assProxy"] label = "Validate Arnold Scene Source CBID" actions = [RepairAction] optional = False @@ -40,15 +40,11 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, @classmethod def get_invalid_couples(cls, instance): - content_nodes_by_name = cls._get_nodes_by_name( - instance.data["contentMembers"] - ) - proxy_nodes_by_name = cls._get_nodes_by_name( - instance.data.get("proxy", []) - ) + nodes_by_name = cls._get_nodes_by_name(instance.data["members"]) + proxy_nodes_by_name = cls._get_nodes_by_name(instance.data["proxy"]) invalid_couples = [] - for content_name, content_node in content_nodes_by_name.items(): + for content_name, content_node in nodes_by_name.items(): proxy_node = proxy_nodes_by_name.get(content_name, None) if not proxy_node: @@ -70,7 +66,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, if not self.is_active(instance.data): return # Proxy validation. - if not instance.data.get("proxy", []): + if not instance.data["proxy"]: return # Validate for proxy nodes sharing the same cbId as content nodes. diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py index 78a247b3f2..7badfdc027 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py @@ -10,6 +10,7 @@ from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError, + OptionalPyblishPluginMixin ) from ayon_core.hosts.maya.api import lib from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings @@ -37,7 +38,8 @@ def get_redshift_image_format_labels(): return mel.eval("{0}={0}".format(var)) -class ValidateRenderSettings(pyblish.api.InstancePlugin): +class ValidateRenderSettings(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validates the global render settings * File Name Prefix must start with: `` @@ -55,7 +57,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): * Frame Padding must be: * default: 4 - * Animation must be toggle on, in Render Settings - Common tab: + * Animation must be toggled on, in Render Settings - Common tab: * vray: Animation on standard of specific * arnold: Frame / Animation ext: Any choice without "(Single Frame)" * redshift: Animation toggled on @@ -67,10 +69,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): """ order = ValidateContentsOrder - label = "Render Settings" + label = "Validate Render Settings" hosts = ["maya"] families = ["renderlayer"] actions = [RepairAction] + optional = True ImagePrefixes = { 'mentalray': 'defaultRenderGlobals.imageFilePrefix', @@ -112,6 +115,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): DEFAULT_PREFIX = "//_" def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..b07c7e9a70 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,132 @@ +from maya import cmds + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + PlaceholderLoadMixin, + LoadPlaceholderItem +) +from ayon_core.hosts.maya.api.lib import ( + get_container_transforms, + get_node_parent, + get_node_index_under_parent +) +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaPlaceholderPlugin, +) + + +class MayaPlaceholderLoadPlugin(MayaPlaceholderPlugin, PlaceholderLoadMixin): + identifier = "maya.load" + label = "Maya load" + + item_class = LoadPlaceholderItem + + def _create_placeholder_name(self, placeholder_data): + + # Split builder type: context_assets, linked_assets, all_assets + prefix, suffix = placeholder_data["builder_type"].split("_", 1) + parts = [prefix] + + # add family if any + placeholder_product_type = placeholder_data.get("product_type") + if placeholder_product_type is None: + placeholder_product_type = placeholder_data.get("family") + + if placeholder_product_type: + parts.append(placeholder_product_type) + + # add loader arguments if any + loader_args = placeholder_data["loader_args"] + if loader_args: + loader_args = eval(loader_args) + for value in loader_args.values(): + parts.append(str(value)) + + parts.append(suffix) + placeholder_name = "_".join(parts) + + return placeholder_name.capitalize() + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + try: + containers = cmds.sets("AVALON_CONTAINERS", q=True) + except ValueError: + containers = [] + + loaded_representation_ids = { + cmds.getAttr(container + ".representation") + for container in containers + } + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def load_succeed(self, placeholder, container): + self._parent_in_hierarchy(placeholder, container) + + def _parent_in_hierarchy(self, placeholder, container): + """Parent loaded container to placeholder's parent. + + ie : Set loaded content as placeholder's sibling + + Args: + container (str): Placeholder loaded containers + """ + + if not container: + return + + # TODO: This currently returns only a single root but a loaded scene + # could technically load more than a single root + container_root = get_container_transforms(container, root=True) + + # Bugfix: The get_container_transforms does not recognize the load + # reference group currently + # TODO: Remove this when it does + parent = get_node_parent(container_root) + if parent: + container_root = parent + roots = [container_root] + + # Add the loaded roots to the holding sets if they exist + holding_sets = cmds.listSets(object=placeholder.scene_identifier) or [] + for holding_set in holding_sets: + cmds.sets(roots, forceElement=holding_set) + + # Parent the roots to the place of the placeholder locator and match + # its matrix + placeholder_form = cmds.xform( + placeholder.scene_identifier, + query=True, + matrix=True, + worldSpace=True + ) + scene_parent = get_node_parent(placeholder.scene_identifier) + for node in set(roots): + cmds.xform(node, matrix=placeholder_form, worldSpace=True) + + if scene_parent != get_node_parent(node): + if scene_parent: + node = cmds.parent(node, scene_parent)[0] + else: + node = cmds.parent(node, world=True)[0] + + # Move loaded nodes in index order next to their placeholder node + cmds.reorder(node, back=True) + index = get_node_index_under_parent(placeholder.scene_identifier) + cmds.reorder(node, front=True) + cmds.reorder(node, relative=index + 1) diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py new file mode 100644 index 0000000000..62e10ba023 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py @@ -0,0 +1,201 @@ +from maya import cmds + +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaPlaceholderPlugin +) +from ayon_core.lib import NumberDef, TextDef, EnumDef +from ayon_core.lib.events import weakref_partial + + +EXAMPLE_SCRIPT = """ +# Access maya commands +from maya import cmds + +# Access the placeholder node +placeholder_node = placeholder.scene_identifier + +# Access the event callback +if event is None: + print(f"Populating {placeholder}") +else: + if event.topic == "template.depth_processed": + print(f"Processed depth: {event.get('depth')}") + elif event.topic == "template.finished": + print("Build finished.") +""".strip() + + +class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin): + """Execute a script at the given `order` during workfile build. + + This is a very low-level placeholder to run Python scripts at a given + point in time during the workfile template build. + + It can create either a locator or an objectSet as placeholder node. + It defaults to an objectSet, since allowing to run on e.g. other + placeholder node members can be useful, e.g. using: + + >>> members = cmds.sets(placeholder.scene_identifier, query=True) + + """ + + identifier = "maya.runscript" + label = "Run Python Script" + + use_selection_as_parent = False + + def get_placeholder_options(self, options=None): + options = options or {} + return [ + NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines asset loading priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ), + TextDef( + "prepare_script", + label="Run at\nprepare", + tooltip="Run before populate at prepare order", + multiline=True, + default=options.get("prepare_script", "") + ), + TextDef( + "populate_script", + label="Run at\npopulate", + tooltip="Run script at populate node order
" + "This is the default behavior", + multiline=True, + default=options.get("populate_script", EXAMPLE_SCRIPT) + ), + TextDef( + "depth_processed_script", + label="Run after\ndepth\niteration", + tooltip="Run script after every build depth iteration", + multiline=True, + default=options.get("depth_processed_script", "") + ), + TextDef( + "finished_script", + label="Run after\nbuild", + tooltip=( + "Run script at build finished.
" + "Note: this even runs if other placeholders had " + "errors during the build" + ), + multiline=True, + default=options.get("finished_script", "") + ), + EnumDef( + "create_nodetype", + label="Nodetype", + items={ + "spaceLocator": "Locator", + "objectSet": "ObjectSet" + }, + tooltip=( + "The placeholder's node type to be created.
" + "Note this only works on create, not on update" + ), + default=options.get("create_nodetype", "objectSet") + ), + ] + + def create_placeholder(self, placeholder_data): + nodetype = placeholder_data.get("create_nodetype", "objectSet") + + if nodetype == "spaceLocator": + super(MayaPlaceholderScriptPlugin, self).create_placeholder( + placeholder_data + ) + elif nodetype == "objectSet": + placeholder_data["plugin_identifier"] = self.identifier + + # Create maya objectSet on selection + selection = cmds.ls(selection=True, long=True) + name = self._create_placeholder_name(placeholder_data) + node = cmds.sets(selection, name=name) + + self.imprint(node, placeholder_data) + + def prepare_placeholders(self, placeholders): + super(MayaPlaceholderScriptPlugin, self).prepare_placeholders( + placeholders + ) + for placeholder in placeholders: + prepare_script = placeholder.data.get("prepare_script") + if not prepare_script: + continue + + self.run_script(placeholder, prepare_script) + + def populate_placeholder(self, placeholder): + + populate_script = placeholder.data.get("populate_script") + depth_script = placeholder.data.get("depth_processed_script") + finished_script = placeholder.data.get("finished_script") + + # Run now + if populate_script: + self.run_script(placeholder, populate_script) + + if not any([depth_script, finished_script]): + # No callback scripts to run + if not placeholder.data.get("keep_placeholder", True): + self.delete_placeholder(placeholder) + return + + # Run at each depth processed + if depth_script: + callback = weakref_partial( + self.run_script, placeholder, depth_script) + self.builder.add_on_depth_processed_callback( + callback, order=placeholder.order) + + # Run at build finish + if finished_script: + callback = weakref_partial( + self.run_script, placeholder, finished_script) + self.builder.add_on_finished_callback( + callback, order=placeholder.order) + + # If placeholder should be deleted, delete it after finish so + # the scripts have access to it up to the last run + if not placeholder.data.get("keep_placeholder", True): + delete_callback = weakref_partial( + self.delete_placeholder, placeholder) + self.builder.add_on_finished_callback( + delete_callback, order=placeholder.order + 1) + + def run_script(self, placeholder, script, event=None): + """Run script + + Even though `placeholder` is an unused arguments by exposing it as + an input argument it means it makes it available through + globals()/locals() in the `exec` call, giving the script access + to the placeholder. + + For example: + >>> node = placeholder.scene_identifier + + In the case the script is running at a callback level (not during + populate) then it has access to the `event` as well, otherwise the + value is None if it runs during `populate_placeholder` directly. + + For example adding this as the callback script: + >>> if event is not None: + >>> if event.topic == "on_depth_processed": + >>> print(f"Processed depth: {event.get('depth')}") + >>> elif event.topic == "on_finished": + >>> print("Build finished.") + + """ + self.log.debug(f"Running script at event: {event}") + exec(script, locals()) diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py index 88ef4b201a..c1d9f019e4 100644 --- a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py @@ -7,7 +7,7 @@ from maya import cmds import ayon_api from ayon_core.pipeline import get_current_project_name -import ayon_core.hosts.maya.lib as maya_lib +import ayon_core.hosts.maya.api.lib as maya_lib from . import lib from .alembic import get_alembic_ids_cache diff --git a/client/ayon_core/hosts/nuke/api/lib.py b/client/ayon_core/hosts/nuke/api/lib.py index 78cbe85097..0a4755c166 100644 --- a/client/ayon_core/hosts/nuke/api/lib.py +++ b/client/ayon_core/hosts/nuke/api/lib.py @@ -43,7 +43,9 @@ from ayon_core.pipeline import ( from ayon_core.pipeline.context_tools import ( get_current_context_custom_workfile_template ) -from ayon_core.pipeline.colorspace import get_imageio_config +from ayon_core.pipeline.colorspace import ( + get_current_context_imageio_config_preset +) from ayon_core.pipeline.workfile import BuildWorkfile from . import gizmo_menu from .constants import ASSIST @@ -1495,18 +1497,28 @@ class WorkfileSettings(object): filter_knobs = [ "viewerProcess", - "wipe_position" + "wipe_position", + "monitorOutOutputTransform" ] + display, viewer = get_viewer_config_from_string( + viewer_dict["viewerProcess"] + ) + viewer_process = create_viewer_profile_string( + viewer, display, path_like=False + ) + display, viewer = get_viewer_config_from_string( + viewer_dict["output_transform"] + ) + output_transform = create_viewer_profile_string( + viewer, display, path_like=False + ) erased_viewers = [] for v in nuke.allNodes(filter="Viewer"): # set viewProcess to preset from settings - v["viewerProcess"].setValue( - str(viewer_dict["viewerProcess"]) - ) + v["viewerProcess"].setValue(viewer_process) - if str(viewer_dict["viewerProcess"]) \ - not in v["viewerProcess"].value(): + if viewer_process not in v["viewerProcess"].value(): copy_inputs = v.dependencies() copy_knobs = {k: v[k].value() for k in v.knobs() if k not in filter_knobs} @@ -1524,11 +1536,11 @@ class WorkfileSettings(object): # set copied knobs for k, v in copy_knobs.items(): - print(k, v) nv[k].setValue(v) # set viewerProcess - nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"])) + nv["viewerProcess"].setValue(viewer_process) + nv["monitorOutOutputTransform"].setValue(output_transform) if erased_viewers: log.warning( @@ -1542,12 +1554,8 @@ class WorkfileSettings(object): imageio_host (dict): host colorspace configurations ''' - config_data = get_imageio_config( - project_name=get_current_project_name(), - host_name="nuke" - ) + config_data = get_current_context_imageio_config_preset() - viewer_process_settings = imageio_host["viewer"]["viewerProcess"] workfile_settings = imageio_host["workfile"] color_management = workfile_settings["color_management"] native_ocio_config = workfile_settings["native_ocio_config"] @@ -1574,29 +1582,6 @@ class WorkfileSettings(object): residual_path )) - # get monitor lut from settings respecting Nuke version differences - monitor_lut = workfile_settings["thumbnail_space"] - monitor_lut_data = self._get_monitor_settings( - viewer_process_settings, monitor_lut - ) - monitor_lut_data["workingSpaceLUT"] = ( - workfile_settings["working_space"] - ) - - # then set the rest - for knob, value_ in monitor_lut_data.items(): - # skip unfilled ocio config path - # it will be dict in value - if isinstance(value_, dict): - continue - # skip empty values - if not value_: - continue - if self._root_node[knob].value() not in value_: - self._root_node[knob].setValue(str(value_)) - log.debug("nuke.root()['{}'] changed to: {}".format( - knob, value_)) - # set ocio config path if config_data: config_path = config_data["path"].replace("\\", "/") @@ -1611,6 +1596,31 @@ class WorkfileSettings(object): if correct_settings: self._set_ocio_config_path_to_workfile(config_data) + # get monitor lut from settings respecting Nuke version differences + monitor_lut_data = self._get_monitor_settings( + workfile_settings["monitor_out_lut"], + workfile_settings["monitor_lut"] + ) + monitor_lut_data.update({ + "workingSpaceLUT": workfile_settings["working_space"], + "int8Lut": workfile_settings["int_8_lut"], + "int16Lut": workfile_settings["int_16_lut"], + "logLut": workfile_settings["log_lut"], + "floatLut": workfile_settings["float_lut"] + }) + + # then set the rest + for knob, value_ in monitor_lut_data.items(): + # skip unfilled ocio config path + # it will be dict in value + if isinstance(value_, dict): + continue + # skip empty values + if not value_: + continue + self._root_node[knob].setValue(str(value_)) + log.debug("nuke.root()['{}'] changed to: {}".format(knob, value_)) + def _get_monitor_settings(self, viewer_lut, monitor_lut): """ Get monitor settings from viewer and monitor lut diff --git a/client/ayon_core/hosts/nuke/api/pipeline.py b/client/ayon_core/hosts/nuke/api/pipeline.py index 0d44aba2f9..d35a2e89e0 100644 --- a/client/ayon_core/hosts/nuke/api/pipeline.py +++ b/client/ayon_core/hosts/nuke/api/pipeline.py @@ -18,6 +18,7 @@ from ayon_core.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, register_inventory_action_path, + register_workfile_build_plugin_path, AYON_INSTANCE_ID, AVALON_INSTANCE_ID, AVALON_CONTAINER_ID, @@ -52,8 +53,6 @@ from .lib import ( MENU_LABEL, ) from .workfile_template_builder import ( - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin, build_workfile_template, create_placeholder, update_placeholder, @@ -76,6 +75,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") # registering pyblish gui regarding settings in presets if os.getenv("PYBLISH_GUI", None): @@ -105,18 +105,11 @@ class NukeHost( def get_workfile_extensions(self): return file_extensions() - def get_workfile_build_placeholder_plugins(self): - return [ - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin - ] - def get_containers(self): return ls() def install(self): - ''' Installing all requarements for Nuke host - ''' + """Installing all requirements for Nuke host""" pyblish.api.register_host("nuke") @@ -125,6 +118,7 @@ class NukeHost( register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) # Register AYON event for workfiles loading. register_event_callback("workio.open_file", check_inventory_versions) @@ -178,7 +172,6 @@ def add_nuke_callbacks(): # set apply all workfile settings on script load and save nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) - if nuke_settings["dirmap"]["enabled"]: log.info("Added Nuke's dir-mapping callback ...") # Add dirmap for file paths. diff --git a/client/ayon_core/hosts/nuke/api/plugin.py b/client/ayon_core/hosts/nuke/api/plugin.py index 5b97fab0c2..ec13104d4d 100644 --- a/client/ayon_core/hosts/nuke/api/plugin.py +++ b/client/ayon_core/hosts/nuke/api/plugin.py @@ -778,6 +778,7 @@ class ExporterReviewMov(ExporterReview): # deal with now lut defined in viewer lut self.viewer_lut_raw = klass.viewer_lut_raw self.write_colorspace = instance.data["colorspace"] + self.color_channels = instance.data["color_channels"] self.name = name or "baked" self.ext = ext or "mov" @@ -834,7 +835,7 @@ class ExporterReviewMov(ExporterReview): self.log.info("Nodes exported...") return path - def generate_mov(self, farm=False, **kwargs): + def generate_mov(self, farm=False, delete=True, **kwargs): # colorspace data colorspace = None # get colorspace settings @@ -947,6 +948,8 @@ class ExporterReviewMov(ExporterReview): self.log.debug("Path: {}".format(self.path)) write_node["file"].setValue(str(self.path)) write_node["file_type"].setValue(str(self.ext)) + write_node["channels"].setValue(str(self.color_channels)) + # Knobs `meta_codec` and `mov64_codec` are not available on centos. # TODO shouldn't this come from settings on outputs? try: @@ -987,8 +990,13 @@ class ExporterReviewMov(ExporterReview): self.render(write_node.name()) # ---------- generate representation data + tags = ["review", "need_thumbnail"] + + if delete: + tags.append("delete") + self.get_representation_data( - tags=["review", "need_thumbnail", "delete"] + add_tags, + tags=tags + add_tags, custom_tags=add_custom_tags, range=True, colorspace=colorspace @@ -1151,7 +1159,6 @@ def _remove_old_knobs(node): "OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority", "deadlineChunkSize", "deadlineConcurrentTasks", "Deadline" ] - print(node.name()) # remove all old knobs for knob in node.allKnobs(): diff --git a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py index 495edd9e5f..aebf91c4a4 100644 --- a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py @@ -1,30 +1,17 @@ import collections import nuke + from ayon_core.pipeline import registered_host from ayon_core.pipeline.workfile.workfile_template_builder import ( AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin, ) from ayon_core.tools.workfile_template_build import ( WorkfileBuildPlaceholderDialog, ) from .lib import ( - find_free_space_to_paste_nodes, - get_extreme_positions, - get_group_io_nodes, imprint, - refresh_node, - refresh_nodes, reset_selection, - get_names_from_nodes, - get_nodes_by_names, - select_nodes, - duplicate_node, - node_tempfile, get_main_window, WorkfileSettings, ) @@ -54,6 +41,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder): return True + class NukePlaceholderPlugin(PlaceholderPlugin): node_color = 4278190335 @@ -120,843 +108,6 @@ class NukePlaceholderPlugin(PlaceholderPlugin): nuke.delete(placeholder_node) -class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): - identifier = "nuke.load" - label = "Nuke load" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderLoadPlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" - ) - if loaded_representation_ids is None: - loaded_representation_ids = set() - for node in nuke.allNodes(): - if "repre_id" in node.knobs(): - loaded_representation_ids.add( - node.knob("repre_id").getValue() - ) - - self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids - ) - return loaded_representation_ids - - def _before_placeholder_load(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def _before_repre_load(self, placeholder, representation): - placeholder.data["last_repre_id"] = representation["id"] - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - # TODO do data validations and maybe updgrades if are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - # TODO get from shared populate data! - nodes_init = placeholder.data["nodes_init"] - nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Loaded nodes: {}".format(nodes_loaded)) - if not nodes_loaded: - return - - placeholder.data["delete"] = True - - nodes_loaded = self._move_to_placeholder_group( - placeholder, nodes_loaded - ) - placeholder.data["last_loaded"] = nodes_loaded - refresh_nodes(nodes_loaded) - - # positioning of the loaded nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_loaded) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of loaded nodes - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) - - self._set_loaded_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new loaded nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_loaded) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_loaded, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the loaded - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_loaded, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_loaded): - """ - opening the placeholder's group and copying loaded nodes in it. - - Returns : - nodes_loaded (list): the new list of pasted nodes - """ - - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_loaded) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_loaded = nuke.selectedNodes() - return nodes_loaded - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is loaded.""" - - nodes_loaded = placeholder.data["last_loaded"] - loaded_backdrops = [] - bd_orders = set() - for node in nodes_loaded: - if isinstance(node, nuke.BackdropNode): - loaded_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in loaded_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes loaded with it) - - add Id to the attributes of all the other nodes - """ - - loaded_nodes = placeholder.data["last_loaded"] - loaded_nodes_set = set(loaded_nodes) - data = {"repre_id": str(placeholder.data["last_repre_id"])} - - for node in loaded_nodes: - node_knobs = node.knobs() - if "builder_type" not in node_knobs: - # save the id of representation for all imported nodes - imprint(node, data) - node.knob("repre_id").setVisible(False) - refresh_node(node) - continue - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(loaded_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_loaded_connections(self, placeholder): - """ - set inputs and outputs of loaded nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - loaded with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - -class NukePlaceholderCreatePlugin( - NukePlaceholderPlugin, PlaceholderCreateMixin -): - identifier = "nuke.create" - label = "Nuke create" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderCreatePlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _before_instance_create(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - - output.append( - CreatePlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - nodes_init = placeholder.data["nodes_init"] - nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Created nodes: {}".format(nodes_created)) - if not nodes_created: - return - - placeholder.data["delete"] = True - - nodes_created = self._move_to_placeholder_group( - placeholder, nodes_created - ) - placeholder.data["last_created"] = nodes_created - refresh_nodes(nodes_created) - - # positioning of the created nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_created) - for node in nodes_created: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_created) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of created nodes - - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_created) - - self._set_created_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new created nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_created) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_created, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the created - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_created, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_created: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_created): - """ - opening the placeholder's group and copying created nodes in it. - - Returns : - nodes_created (list): the new list of pasted nodes - """ - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_created) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_created = nuke.selectedNodes() - return nodes_created - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is create.""" - - nodes_created = placeholder.data["last_created"] - created_backdrops = [] - bd_orders = set() - for node in nodes_created: - if isinstance(node, nuke.BackdropNode): - created_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in created_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes created with it) - - add Id to the attributes of all the other nodes - """ - - created_nodes = placeholder.data["last_created"] - created_nodes_set = set(created_nodes) - - for node in created_nodes: - node_knobs = node.knobs() - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(created_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_created_connections(self, placeholder): - """ - set inputs and outputs of created nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_created"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - created with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_created"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - def build_workfile_template(*args, **kwargs): builder = NukeTemplateBuilder(registered_host()) builder.build_template(*args, **kwargs) diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py index 7d823919dc..50af8a4eb9 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py @@ -62,7 +62,7 @@ class LoadBackdropNodes(load.LoaderPlugin): } # add attributes from the version to imprint to metadata knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path @@ -206,7 +206,7 @@ class LoadBackdropNodes(load.LoaderPlugin): "colorspaceInput": colorspace, } - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # adding nodes to node graph diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py index 14c54c3adc..3c7d4f3bb2 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py @@ -48,7 +48,7 @@ class AlembicCameraLoader(load.LoaderPlugin): "frameEnd": last, "version": version_entity["version"], } - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path @@ -123,7 +123,7 @@ class AlembicCameraLoader(load.LoaderPlugin): } # add attributes from the version to imprint to metadata knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_clip.py b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py index df8f2ab018..7fa90da86f 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_clip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py @@ -9,7 +9,8 @@ from ayon_core.pipeline import ( get_representation_path, ) from ayon_core.pipeline.colorspace import ( - get_imageio_file_rules_colorspace_from_filepath + get_imageio_file_rules_colorspace_from_filepath, + get_current_context_imageio_config_preset, ) from ayon_core.hosts.nuke.api.lib import ( get_imageio_input_colorspace, @@ -197,7 +198,6 @@ class LoadClip(plugin.NukeLoader): "frameStart", "frameEnd", "source", - "author", "fps", "handleStart", "handleEnd", @@ -347,8 +347,7 @@ class LoadClip(plugin.NukeLoader): "source": version_attributes.get("source"), "handleStart": str(self.handle_start), "handleEnd": str(self.handle_end), - "fps": str(version_attributes.get("fps")), - "author": version_attributes.get("author") + "fps": str(version_attributes.get("fps")) } last_version_entity = ayon_api.get_last_version_by_product_id( @@ -547,9 +546,10 @@ class LoadClip(plugin.NukeLoader): f"Colorspace from representation colorspaceData: {colorspace}" ) + config_data = get_current_context_imageio_config_preset() # check if any filerules are not applicable new_parsed_colorspace = get_imageio_file_rules_colorspace_from_filepath( # noqa - filepath, "nuke", project_name + filepath, "nuke", project_name, config_data=config_data ) self.log.debug(f"Colorspace new filerules: {new_parsed_colorspace}") diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_effects.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py index a87c81295a..be7420fcf0 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_effects.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py @@ -69,7 +69,6 @@ class LoadEffects(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -189,7 +188,6 @@ class LoadEffects(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps", ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py index 8fa1347598..9bb430b37b 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py @@ -69,7 +69,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -192,7 +191,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py index 95f85bacfc..57d00795ae 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py @@ -71,7 +71,6 @@ class LoadGizmo(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -139,7 +138,6 @@ class LoadGizmo(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py index 3112e27811..ed2b1ec458 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -73,7 +73,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -145,7 +144,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_image.py b/client/ayon_core/hosts/nuke/plugins/load/load_image.py index d825b621fc..b5fccd8a0d 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_image.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_image.py @@ -133,7 +133,7 @@ class LoadImage(load.LoaderPlugin): "version": version_entity["version"], "colorspace": colorspace, } - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes.get(k, str(None)) r["tile_color"].setValue(int("0x4ecd25ff", 16)) @@ -207,7 +207,6 @@ class LoadImage(load.LoaderPlugin): "colorspace": version_attributes.get("colorSpace"), "source": version_attributes.get("source"), "fps": str(version_attributes.get("fps")), - "author": version_attributes.get("author") } # change color of node diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_model.py b/client/ayon_core/hosts/nuke/plugins/load/load_model.py index 0326e0a4fc..40862cd1e0 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_model.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_model.py @@ -47,7 +47,7 @@ class AlembicModelLoader(load.LoaderPlugin): "version": version_entity["version"] } # add attributes from the version to imprint to metadata knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path @@ -130,7 +130,7 @@ class AlembicModelLoader(load.LoaderPlugin): } # add additional metadata from the version to imprint to Avalon knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py index 3e554f9d3b..d6699be164 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py @@ -55,7 +55,6 @@ class LinkAsGroup(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -131,7 +130,6 @@ class LinkAsGroup(load.LoaderPlugin): "colorspace": version_attributes.get("colorSpace"), "source": version_attributes.get("source"), "fps": version_attributes.get("fps"), - "author": version_attributes.get("author") } # Update the imprinted representation diff --git a/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py index 745351dc49..27525bcad1 100644 --- a/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py @@ -153,6 +153,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin, # Determine defined file type ext = write_node["file_type"].value() + # determine defined channel type + color_channels = write_node["channels"].value() + # get frame range data handle_start = instance.context.data["handleStart"] handle_end = instance.context.data["handleEnd"] @@ -172,7 +175,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin, "path": write_file_path, "outputDir": output_dir, "ext": ext, - "colorspace": colorspace + "colorspace": colorspace, + "color_channels": color_channels }) if product_type == "render": diff --git a/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py index 8d7a3ec311..82c7b6e4c5 100644 --- a/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py @@ -136,11 +136,16 @@ class ExtractReviewIntermediates(publish.Extractor): self, instance, o_name, o_data["extension"], multiple_presets) + o_data["add_custom_tags"].append("intermediate") + delete = not o_data.get("publish", False) + if instance.data.get("farm"): if "review" in instance.data["families"]: instance.data["families"].remove("review") - data = exporter.generate_mov(farm=True, **o_data) + data = exporter.generate_mov( + farm=True, delete=delete, **o_data + ) self.log.debug( "_ data: {}".format(data)) @@ -154,7 +159,7 @@ class ExtractReviewIntermediates(publish.Extractor): "bakeWriteNodeName": data.get("bakeWriteNodeName") }) else: - data = exporter.generate_mov(**o_data) + data = exporter.generate_mov(delete=delete, **o_data) # add representation generated by exporter generated_repres.extend(data["representations"]) diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py new file mode 100644 index 0000000000..a5490021e4 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py @@ -0,0 +1,428 @@ +import nuke + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + CreatePlaceholderItem, + PlaceholderCreateMixin, +) +from ayon_core.hosts.nuke.api.lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, +) +from ayon_core.hosts.nuke.api.workfile_template_builder import ( + NukePlaceholderPlugin +) + + +class NukePlaceholderCreatePlugin( + NukePlaceholderPlugin, PlaceholderCreateMixin +): + identifier = "nuke.create" + label = "Nuke create" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderCreatePlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _before_instance_create(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + + output.append( + CreatePlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + nodes_init = placeholder.data["nodes_init"] + nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Created nodes: {}".format(nodes_created)) + if not nodes_created: + return + + placeholder.data["delete"] = True + + nodes_created = self._move_to_placeholder_group( + placeholder, nodes_created + ) + placeholder.data["last_created"] = nodes_created + refresh_nodes(nodes_created) + + # positioning of the created nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_created) + for node in nodes_created: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_created) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of created nodes + + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_created) + + self._set_created_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new created nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_created) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_created, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the created + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_created, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_created: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_created): + """ + opening the placeholder's group and copying created nodes in it. + + Returns : + nodes_created (list): the new list of pasted nodes + """ + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_created) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_created = nuke.selectedNodes() + return nodes_created + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is create.""" + + nodes_created = placeholder.data["last_created"] + created_backdrops = [] + bd_orders = set() + for node in nodes_created: + if isinstance(node, nuke.BackdropNode): + created_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in created_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes created with it) + - add Id to the attributes of all the other nodes + """ + + created_nodes = placeholder.data["last_created"] + created_nodes_set = set(created_nodes) + + for node in created_nodes: + node_knobs = node.knobs() + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(created_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_created_connections(self, placeholder): + """ + set inputs and outputs of created nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_created"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + created with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_created"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..258f48c9d3 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,455 @@ +import nuke + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + LoadPlaceholderItem, + PlaceholderLoadMixin, +) +from ayon_core.hosts.nuke.api.lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, +) +from ayon_core.hosts.nuke.api.workfile_template_builder import ( + NukePlaceholderPlugin +) + + +class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): + identifier = "nuke.load" + label = "Nuke load" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderLoadPlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + loaded_representation_ids = set() + for node in nuke.allNodes(): + if "repre_id" in node.knobs(): + loaded_representation_ids.add( + node.knob("repre_id").getValue() + ) + + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def _before_placeholder_load(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def _before_repre_load(self, placeholder, representation): + placeholder.data["last_repre_id"] = representation["id"] + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + # TODO do data validations and maybe updgrades if are invalid + output.append( + LoadPlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + # TODO get from shared populate data! + nodes_init = placeholder.data["nodes_init"] + nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Loaded nodes: {}".format(nodes_loaded)) + if not nodes_loaded: + return + + placeholder.data["delete"] = True + + nodes_loaded = self._move_to_placeholder_group( + placeholder, nodes_loaded + ) + placeholder.data["last_loaded"] = nodes_loaded + refresh_nodes(nodes_loaded) + + # positioning of the loaded nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_loaded) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of loaded nodes + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) + + self._set_loaded_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new loaded nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_loaded) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_loaded, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the loaded + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_loaded, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_loaded): + """ + opening the placeholder's group and copying loaded nodes in it. + + Returns : + nodes_loaded (list): the new list of pasted nodes + """ + + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_loaded) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_loaded = nuke.selectedNodes() + return nodes_loaded + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is loaded.""" + + nodes_loaded = placeholder.data["last_loaded"] + loaded_backdrops = [] + bd_orders = set() + for node in nodes_loaded: + if isinstance(node, nuke.BackdropNode): + loaded_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in loaded_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes loaded with it) + - add Id to the attributes of all the other nodes + """ + + loaded_nodes = placeholder.data["last_loaded"] + loaded_nodes_set = set(loaded_nodes) + data = {"repre_id": str(placeholder.data["last_repre_id"])} + + for node in loaded_nodes: + node_knobs = node.knobs() + if "builder_type" not in node_knobs: + # save the id of representation for all imported nodes + imprint(node, data) + node.knob("repre_id").setVisible(False) + refresh_node(node) + continue + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(loaded_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_loaded_connections(self, placeholder): + """ + set inputs and outputs of loaded nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + loaded with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) diff --git a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py index 26f2469844..a44c3490c6 100644 --- a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py @@ -35,8 +35,12 @@ class ImageCreator(Creator): create_empty_group = False stub = api.stub() # only after PS is up - top_level_selected_items = stub.get_selected_layers() if pre_create_data.get("use_selection"): + try: + top_level_selected_items = stub.get_selected_layers() + except ValueError: + raise CreatorError("Cannot group locked Background layer!") + only_single_item_selected = len(top_level_selected_items) == 1 if ( only_single_item_selected or @@ -50,11 +54,12 @@ class ImageCreator(Creator): group = stub.group_selected_layers(product_name_from_ui) groups_to_create.append(group) else: - stub.select_layers(stub.get_layers()) try: + stub.select_layers(stub.get_layers()) group = stub.group_selected_layers(product_name_from_ui) - except: + except ValueError: raise CreatorError("Cannot group locked Background layer!") + groups_to_create.append(group) # create empty group if nothing selected diff --git a/client/ayon_core/hosts/substancepainter/api/lib.py b/client/ayon_core/hosts/substancepainter/api/lib.py index 1cb480b552..64c39943ce 100644 --- a/client/ayon_core/hosts/substancepainter/api/lib.py +++ b/client/ayon_core/hosts/substancepainter/api/lib.py @@ -586,7 +586,6 @@ def prompt_new_file_with_mesh(mesh_filepath): # TODO: find a way to improve the process event to # load more complicated mesh app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000) - file_dialog.done(file_dialog.Accepted) app.processEvents(QtCore.QEventLoop.AllEvents) @@ -606,7 +605,7 @@ def prompt_new_file_with_mesh(mesh_filepath): mesh_select.setVisible(False) # Ensure UI is visually up-to-date - app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents) + app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 8000) # Trigger the 'select file' dialog to set the path and have the # new file dialog to use the path. @@ -623,8 +622,6 @@ def prompt_new_file_with_mesh(mesh_filepath): "Failed to set mesh path with the prompt dialog:" f"{mesh_filepath}\n\n" "Creating new project directly with the mesh path instead.") - else: - dialog.done(dialog.Accepted) new_action = _get_new_project_action() if not new_action: diff --git a/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py index 01cb65dd5c..d5aac1191c 100644 --- a/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py +++ b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py @@ -1,3 +1,5 @@ +import copy +from qtpy import QtWidgets, QtCore from ayon_core.pipeline import ( load, get_representation_path, @@ -8,10 +10,133 @@ from ayon_core.hosts.substancepainter.api.pipeline import ( set_container_metadata, remove_container_metadata ) -from ayon_core.hosts.substancepainter.api.lib import prompt_new_file_with_mesh import substance_painter.project -import qargparse + + +def _convert(substance_attr): + """Return Substance Painter Python API Project attribute from string. + + This converts a string like "ProjectWorkflow.Default" to for example + the Substance Painter Python API equivalent object, like: + `substance_painter.project.ProjectWorkflow.Default` + + Args: + substance_attr (str): The `substance_painter.project` attribute, + for example "ProjectWorkflow.Default" + + Returns: + Any: Substance Python API object of the project attribute. + + Raises: + ValueError: If attribute does not exist on the + `substance_painter.project` python api. + """ + root = substance_painter.project + for attr in substance_attr.split("."): + root = getattr(root, attr, None) + if root is None: + raise ValueError( + "Substance Painter project attribute" + f" does not exist: {substance_attr}") + + return root + + +def get_template_by_name(name: str, templates: list[dict]) -> dict: + return next( + template for template in templates + if template["name"] == name + ) + + +class SubstanceProjectConfigurationWindow(QtWidgets.QDialog): + """The pop-up dialog allows users to choose material + duplicate options for importing Max objects when updating + or switching assets. + """ + def __init__(self, project_templates): + super(SubstanceProjectConfigurationWindow, self).__init__() + self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) + + self.configuration = None + self.template_names = [template["name"] for template + in project_templates] + self.project_templates = project_templates + + self.widgets = { + "label": QtWidgets.QLabel( + "Select your template for project configuration"), + "template_options": QtWidgets.QComboBox(), + "import_cameras": QtWidgets.QCheckBox("Import Cameras"), + "preserve_strokes": QtWidgets.QCheckBox("Preserve Strokes"), + "clickbox": QtWidgets.QWidget(), + "combobox": QtWidgets.QWidget(), + "buttons": QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok + | QtWidgets.QDialogButtonBox.Cancel) + } + + self.widgets["template_options"].addItems(self.template_names) + + template_name = self.widgets["template_options"].currentText() + self._update_to_match_template(template_name) + # Build clickboxes + layout = QtWidgets.QHBoxLayout(self.widgets["clickbox"]) + layout.addWidget(self.widgets["import_cameras"]) + layout.addWidget(self.widgets["preserve_strokes"]) + # Build combobox + layout = QtWidgets.QHBoxLayout(self.widgets["combobox"]) + layout.addWidget(self.widgets["template_options"]) + # Build buttons + layout = QtWidgets.QHBoxLayout(self.widgets["buttons"]) + # Build layout. + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(self.widgets["label"]) + layout.addWidget(self.widgets["combobox"]) + layout.addWidget(self.widgets["clickbox"]) + layout.addWidget(self.widgets["buttons"]) + + self.widgets["template_options"].currentTextChanged.connect( + self._update_to_match_template) + self.widgets["buttons"].accepted.connect(self.on_accept) + self.widgets["buttons"].rejected.connect(self.on_reject) + + def on_accept(self): + self.configuration = self.get_project_configuration() + self.close() + + def on_reject(self): + self.close() + + def _update_to_match_template(self, template_name): + template = get_template_by_name(template_name, self.project_templates) + self.widgets["import_cameras"].setChecked(template["import_cameras"]) + self.widgets["preserve_strokes"].setChecked( + template["preserve_strokes"]) + + def get_project_configuration(self): + templates = self.project_templates + template_name = self.widgets["template_options"].currentText() + template = get_template_by_name(template_name, templates) + template = copy.deepcopy(template) # do not edit the original + template["import_cameras"] = self.widgets["import_cameras"].isChecked() + template["preserve_strokes"] = ( + self.widgets["preserve_strokes"].isChecked() + ) + for key in ["normal_map_format", + "project_workflow", + "tangent_space_mode"]: + template[key] = _convert(template[key]) + return template + + @classmethod + def prompt(cls, templates): + dialog = cls(templates) + dialog.exec_() + configuration = dialog.configuration + dialog.deleteLater() + return configuration class SubstanceLoadProjectMesh(load.LoaderPlugin): @@ -25,48 +150,35 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin): icon = "code-fork" color = "orange" - options = [ - qargparse.Boolean( - "preserve_strokes", - default=True, - help="Preserve strokes positions on mesh.\n" - "(only relevant when loading into existing project)" - ), - qargparse.Boolean( - "import_cameras", - default=True, - help="Import cameras from the mesh file." - ) - ] + # Defined via settings + project_templates = [] - def load(self, context, name, namespace, data): + def load(self, context, name, namespace, options=None): # Get user inputs - import_cameras = data.get("import_cameras", True) - preserve_strokes = data.get("preserve_strokes", True) - sp_settings = substance_painter.project.Settings( - import_cameras=import_cameras - ) + result = SubstanceProjectConfigurationWindow.prompt( + self.project_templates) + if not result: + # cancelling loader action + return if not substance_painter.project.is_open(): # Allow to 'initialize' a new project path = self.filepath_from_context(context) - # TODO: improve the prompt dialog function to not - # only works for simple polygon scene - result = prompt_new_file_with_mesh(mesh_filepath=path) - if not result: - self.log.info("User cancelled new project prompt." - "Creating new project directly from" - " Substance Painter API Instead.") - settings = substance_painter.project.create( - mesh_file_path=path, settings=sp_settings - ) - + sp_settings = substance_painter.project.Settings( + import_cameras=result["import_cameras"], + normal_map_format=result["normal_map_format"], + project_workflow=result["project_workflow"], + tangent_space_mode=result["tangent_space_mode"], + default_texture_resolution=result["default_texture_resolution"] + ) + settings = substance_painter.project.create( + mesh_file_path=path, settings=sp_settings + ) else: # Reload the mesh settings = substance_painter.project.MeshReloadingSettings( - import_cameras=import_cameras, - preserve_strokes=preserve_strokes - ) + import_cameras=result["import_cameras"], + preserve_strokes=result["preserve_strokes"]) def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa @@ -92,7 +204,7 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin): # from the user's original choice. We don't store 'preserve_strokes' # as we always preserve strokes on updates. container["options"] = { - "import_cameras": import_cameras, + "import_cameras": result["import_cameras"], } set_container_metadata(project_mesh_object_name, container) diff --git a/client/ayon_core/hosts/traypublisher/addon.py b/client/ayon_core/hosts/traypublisher/addon.py index 70bdfe9a64..3dd275f223 100644 --- a/client/ayon_core/hosts/traypublisher/addon.py +++ b/client/ayon_core/hosts/traypublisher/addon.py @@ -1,5 +1,6 @@ import os +from pathlib import Path from ayon_core.lib import get_ayon_launcher_args from ayon_core.lib.execute import run_detached_process from ayon_core.addon import ( @@ -57,3 +58,62 @@ def launch(): from ayon_core.tools import traypublisher traypublisher.main() + + +@cli_main.command() +@click_wrap.option( + "--filepath", + help="Full path to CSV file with data", + type=str, + required=True +) +@click_wrap.option( + "--project", + help="Project name in which the context will be used", + type=str, + required=True +) +@click_wrap.option( + "--folder-path", + help="Asset name in which the context will be used", + type=str, + required=True +) +@click_wrap.option( + "--task", + help="Task name under Asset in which the context will be used", + type=str, + required=False +) +@click_wrap.option( + "--ignore-validators", + help="Option to ignore validators", + type=bool, + is_flag=True, + required=False +) +def ingestcsv( + filepath, + project, + folder_path, + task, + ignore_validators +): + """Ingest CSV file into project. + + This command will ingest CSV file into project. CSV file must be in + specific format. See documentation for more information. + """ + from .csv_publish import csvpublish + + # use Path to check if csv_filepath exists + if not Path(filepath).exists(): + raise FileNotFoundError(f"File {filepath} does not exist.") + + csvpublish( + filepath, + project, + folder_path, + task, + ignore_validators + ) diff --git a/client/ayon_core/hosts/traypublisher/csv_publish.py b/client/ayon_core/hosts/traypublisher/csv_publish.py new file mode 100644 index 0000000000..2762172936 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/csv_publish.py @@ -0,0 +1,84 @@ +import pyblish.api +import pyblish.util + +from ayon_api import get_folder_by_path, get_task_by_name +from ayon_core.lib.attribute_definitions import FileDefItem +from ayon_core.pipeline import install_host +from ayon_core.pipeline.create import CreateContext + +from ayon_core.hosts.traypublisher.api import TrayPublisherHost + + +def csvpublish( + filepath, + project_name, + folder_path, + task_name=None, + ignore_validators=False +): + """Publish CSV file. + + Args: + filepath (str): Path to CSV file. + project_name (str): Project name. + folder_path (str): Folder path. + task_name (Optional[str]): Task name. + ignore_validators (Optional[bool]): Option to ignore validators. + """ + + # initialization of host + host = TrayPublisherHost() + install_host(host) + + # setting host context into project + host.set_project_name(project_name) + + # form precreate data with field values + file_field = FileDefItem.from_paths([filepath], False).pop().to_dict() + precreate_data = { + "csv_filepath_data": file_field, + } + + # create context initialization + create_context = CreateContext(host, headless=True) + folder_entity = get_folder_by_path( + project_name, + folder_path=folder_path, + ) + + if not folder_entity: + ValueError( + f"Folder path '{folder_path}' doesn't " + f"exists at project '{project_name}'." + ) + + task_entity = get_task_by_name( + project_name, + folder_entity["id"], + task_name, + ) + + if not task_entity: + ValueError( + f"Task name '{task_name}' doesn't " + f"exists at folder '{folder_path}'." + ) + + create_context.create( + "io.ayon.creators.traypublisher.csv_ingest", + "Main", + folder_entity=folder_entity, + task_entity=task_entity, + pre_create_data=precreate_data, + ) + + # publishing context initialization + pyblish_context = pyblish.api.Context() + pyblish_context.data["create_context"] = create_context + + # redefine targets (skip 'local' to disable validators) + if ignore_validators: + targets = ["default", "ingest"] + + # publishing + pyblish.util.publish(context=pyblish_context, targets=targets) diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py index 4d865c1c5c..da05afe86b 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py @@ -156,14 +156,9 @@ This creator publishes color space look file (LUT). ] def apply_settings(self, project_settings): - host = self.create_context.host - host_name = host.name - project_name = host.get_current_project_name() - config_data = colorspace.get_imageio_config( - project_name, host_name, + config_data = colorspace.get_current_context_imageio_config_preset( project_settings=project_settings ) - if not config_data: self.enabled = False return diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py new file mode 100644 index 0000000000..8143e8b45b --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py @@ -0,0 +1,741 @@ +import os +import re +import csv +import clique +from io import StringIO +from copy import deepcopy, copy + +from ayon_api import get_folder_by_path, get_task_by_name +from ayon_core.pipeline.create import get_product_name +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import FileDef, BoolDef +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS, IMAGE_EXTENSIONS +) +from ayon_core.pipeline.create import CreatorError +from ayon_core.hosts.traypublisher.api.plugin import ( + TrayPublishCreator +) + + +class IngestCSV(TrayPublishCreator): + """CSV ingest creator class""" + + icon = "fa.file" + + label = "CSV Ingest" + product_type = "csv_ingest_file" + identifier = "io.ayon.creators.traypublisher.csv_ingest" + + default_variants = ["Main"] + + description = "Ingest products' data from CSV file" + detailed_description = """ +Ingest products' data from CSV file following column and representation +configuration in project settings. +""" + + # Position in the list of creators. + order = 10 + + # settings for this creator + columns_config = {} + representations_config = {} + + def create(self, subset_name, instance_data, pre_create_data): + """Create an product from each row found in the CSV. + + Args: + subset_name (str): The subset name. + instance_data (dict): The instance data. + pre_create_data (dict): + """ + + csv_filepath_data = pre_create_data.get("csv_filepath_data", {}) + + folder = csv_filepath_data.get("directory", "") + if not os.path.exists(folder): + raise CreatorError( + f"Directory '{folder}' does not exist." + ) + filename = csv_filepath_data.get("filenames", []) + self._process_csv_file(subset_name, instance_data, folder, filename[0]) + + def _process_csv_file( + self, subset_name, instance_data, staging_dir, filename): + """Process CSV file. + + Args: + subset_name (str): The subset name. + instance_data (dict): The instance data. + staging_dir (str): The staging directory. + filename (str): The filename. + """ + + # create new instance from the csv file via self function + self._pass_data_to_csv_instance( + instance_data, + staging_dir, + filename + ) + + csv_instance = CreatedInstance( + self.product_type, subset_name, instance_data, self + ) + self._store_new_instance(csv_instance) + + csv_instance["csvFileData"] = { + "filename": filename, + "staging_dir": staging_dir, + } + + # from special function get all data from csv file and convert them + # to new instances + csv_data_for_instances = self._get_data_from_csv( + staging_dir, filename) + + # create instances from csv data via self function + self._create_instances_from_csv_data( + csv_data_for_instances, staging_dir + ) + + def _create_instances_from_csv_data( + self, + csv_data_for_instances, + staging_dir + ): + """Create instances from csv data""" + + for folder_path, prepared_data in csv_data_for_instances.items(): + project_name = self.create_context.get_current_project_name() + products = prepared_data["products"] + + for instance_name, product_data in products.items(): + # get important instance variables + task_name = product_data["task_name"] + task_type = product_data["task_type"] + variant = product_data["variant"] + product_type = product_data["product_type"] + version = product_data["version"] + + # create subset/product name + product_name = get_product_name( + project_name, + task_name, + task_type, + self.host_name, + product_type, + variant + ) + + # make sure frame start/end is inherited from csv columns + # expected frame range data are handles excluded + for _, repre_data in product_data["representations"].items(): # noqa: E501 + frame_start = repre_data["frameStart"] + frame_end = repre_data["frameEnd"] + handle_start = repre_data["handleStart"] + handle_end = repre_data["handleEnd"] + fps = repre_data["fps"] + break + + # try to find any version comment in representation data + version_comment = next( + iter( + repre_data["comment"] + for repre_data in product_data["representations"].values() # noqa: E501 + if repre_data["comment"] + ), + None + ) + + # try to find any slate switch in representation data + slate_exists = any( + repre_data["slate"] + for _, repre_data in product_data["representations"].items() # noqa: E501 + ) + + # get representations from product data + representations = product_data["representations"] + label = f"{folder_path}_{product_name}_v{version:>03}" + + families = ["csv_ingest"] + if slate_exists: + # adding slate to families mainly for loaders to be able + # to filter out slates + families.append("slate") + + # make product data + product_data = { + "name": instance_name, + "folderPath": folder_path, + "families": families, + "label": label, + "task": task_name, + "variant": variant, + "source": "csv", + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "fps": fps, + "version": version, + "comment": version_comment, + } + + # create new instance + new_instance = CreatedInstance( + product_type, product_name, product_data, self + ) + self._store_new_instance(new_instance) + + if not new_instance.get("prepared_data_for_repres"): + new_instance["prepared_data_for_repres"] = [] + + base_thumbnail_repre_data = { + "name": "thumbnail", + "ext": None, + "files": None, + "stagingDir": None, + "stagingDir_persistent": True, + "tags": ["thumbnail", "delete"], + } + # need to populate all thumbnails for all representations + # so we can check if unique thumbnail per representation + # is needed + thumbnails = [ + repre_data["thumbnailPath"] + for repre_data in representations.values() + if repre_data["thumbnailPath"] + ] + multiple_thumbnails = len(set(thumbnails)) > 1 + explicit_output_name = None + thumbnails_processed = False + for filepath, repre_data in representations.items(): + # check if any review derivate tag is present + reviewable = any( + tag for tag in repre_data.get("tags", []) + # tag can be `ftrackreview` or `review` + if "review" in tag + ) + # since we need to populate multiple thumbnails as + # representation with outputName for (Ftrack instance + # integrator) pairing with reviewable video representations + if ( + thumbnails + and multiple_thumbnails + and reviewable + ): + # multiple unique thumbnails per representation needs + # grouping by outputName + # mainly used in Ftrack instance integrator + explicit_output_name = repre_data["representationName"] + relative_thumbnail_path = repre_data["thumbnailPath"] + # representation might not have thumbnail path + # so ignore this one + if not relative_thumbnail_path: + continue + thumb_dir, thumb_file = \ + self._get_refactor_thumbnail_path( + staging_dir, relative_thumbnail_path) + filename, ext = os.path.splitext(thumb_file) + thumbnail_repr_data = deepcopy( + base_thumbnail_repre_data) + thumbnail_repr_data.update({ + "name": "thumbnail_{}".format(filename), + "ext": ext[1:], + "files": thumb_file, + "stagingDir": thumb_dir, + "outputName": explicit_output_name, + }) + new_instance["prepared_data_for_repres"].append({ + "type": "thumbnail", + "colorspace": None, + "representation": thumbnail_repr_data, + }) + # also add thumbnailPath for ayon to integrate + if not new_instance.get("thumbnailPath"): + new_instance["thumbnailPath"] = ( + os.path.join(thumb_dir, thumb_file) + ) + elif ( + thumbnails + and not multiple_thumbnails + and not thumbnails_processed + or not reviewable + ): + """ + For case where we have only one thumbnail + and not reviewable medias. This needs to be processed + only once per instance. + """ + if not thumbnails: + continue + # here we will use only one thumbnail for + # all representations + relative_thumbnail_path = repre_data["thumbnailPath"] + # popping last thumbnail from list since it is only one + # and we do not need to iterate again over it + if not relative_thumbnail_path: + relative_thumbnail_path = thumbnails.pop() + thumb_dir, thumb_file = \ + self._get_refactor_thumbnail_path( + staging_dir, relative_thumbnail_path) + _, ext = os.path.splitext(thumb_file) + thumbnail_repr_data = deepcopy( + base_thumbnail_repre_data) + thumbnail_repr_data.update({ + "ext": ext[1:], + "files": thumb_file, + "stagingDir": thumb_dir + }) + new_instance["prepared_data_for_repres"].append({ + "type": "thumbnail", + "colorspace": None, + "representation": thumbnail_repr_data, + }) + # also add thumbnailPath for ayon to integrate + if not new_instance.get("thumbnailPath"): + new_instance["thumbnailPath"] = ( + os.path.join(thumb_dir, thumb_file) + ) + + thumbnails_processed = True + + # get representation data + representation_data = self._get_representation_data( + filepath, repre_data, staging_dir, + explicit_output_name + ) + + new_instance["prepared_data_for_repres"].append({ + "type": "media", + "colorspace": repre_data["colorspace"], + "representation": representation_data, + }) + + def _get_refactor_thumbnail_path( + self, staging_dir, relative_thumbnail_path): + thumbnail_abs_path = os.path.join( + staging_dir, relative_thumbnail_path) + return os.path.split( + thumbnail_abs_path) + + def _get_representation_data( + self, filepath, repre_data, staging_dir, explicit_output_name=None + ): + """Get representation data + + Args: + filepath (str): Filepath to representation file. + repre_data (dict): Representation data from CSV file. + staging_dir (str): Staging directory. + explicit_output_name (Optional[str]): Explicit output name. + For grouping purposes with reviewable components. + Defaults to None. + """ + + # get extension of file + basename = os.path.basename(filepath) + extension = os.path.splitext(filepath)[-1].lower() + + # validate filepath is having correct extension based on output + repre_name = repre_data["representationName"] + repre_config_data = None + for repre in self.representations_config["representations"]: + if repre["name"] == repre_name: + repre_config_data = repre + break + + if not repre_config_data: + raise CreatorError( + f"Representation '{repre_name}' not found " + "in config representation data." + ) + + validate_extensions = repre_config_data["extensions"] + if extension not in validate_extensions: + raise CreatorError( + f"File extension '{extension}' not valid for " + f"output '{validate_extensions}'." + ) + + is_sequence = (extension in IMAGE_EXTENSIONS) + # convert ### string in file name to %03d + # this is for correct frame range validation + # example: file.###.exr -> file.%03d.exr + if "#" in basename: + padding = len(basename.split("#")) - 1 + basename = basename.replace("#" * padding, f"%0{padding}d") + is_sequence = True + + # make absolute path to file + absfilepath = os.path.normpath(os.path.join(staging_dir, filepath)) + dirname = os.path.dirname(absfilepath) + + # check if dirname exists + if not os.path.isdir(dirname): + raise CreatorError( + f"Directory '{dirname}' does not exist." + ) + + # collect all data from dirname + paths_for_collection = [] + for file in os.listdir(dirname): + filepath = os.path.join(dirname, file) + paths_for_collection.append(filepath) + + collections, _ = clique.assemble(paths_for_collection) + + if collections: + collections = collections[0] + else: + if is_sequence: + raise CreatorError( + f"No collections found in directory '{dirname}'." + ) + + frame_start = None + frame_end = None + if is_sequence: + files = [os.path.basename(file) for file in collections] + frame_start = list(collections.indexes)[0] + frame_end = list(collections.indexes)[-1] + else: + files = basename + + tags = deepcopy(repre_data["tags"]) + # if slate in repre_data is True then remove one frame from start + if repre_data["slate"]: + tags.append("has_slate") + + # get representation data + representation_data = { + "name": repre_name, + "ext": extension[1:], + "files": files, + "stagingDir": dirname, + "stagingDir_persistent": True, + "tags": tags, + } + if extension in VIDEO_EXTENSIONS: + representation_data.update({ + "fps": repre_data["fps"], + "outputName": repre_name, + }) + + if explicit_output_name: + representation_data["outputName"] = explicit_output_name + + if frame_start: + representation_data["frameStart"] = frame_start + if frame_end: + representation_data["frameEnd"] = frame_end + + return representation_data + + def _get_data_from_csv( + self, package_dir, filename + ): + """Generate instances from the csv file""" + # get current project name and code from context.data + project_name = self.create_context.get_current_project_name() + + csv_file_path = os.path.join( + package_dir, filename + ) + + # make sure csv file contains columns from following list + required_columns = [ + column["name"] for column in self.columns_config["columns"] + if column["required_column"] + ] + + # read csv file + with open(csv_file_path, "r") as csv_file: + csv_content = csv_file.read() + + # read csv file with DictReader + csv_reader = csv.DictReader( + StringIO(csv_content), + delimiter=self.columns_config["csv_delimiter"] + ) + + # fix fieldnames + # sometimes someone can keep extra space at the start or end of + # the column name + all_columns = [ + " ".join(column.rsplit()) for column in csv_reader.fieldnames] + + # return back fixed fieldnames + csv_reader.fieldnames = all_columns + + # check if csv file contains all required columns + if any(column not in all_columns for column in required_columns): + raise CreatorError( + f"Missing required columns: {required_columns}" + ) + + csv_data = {} + # get data from csv file + for row in csv_reader: + # Get required columns first + # TODO: will need to be folder path in CSV + # TODO: `context_asset_name` is now `folder_path` + folder_path = self._get_row_value_with_validation( + "Folder Path", row) + task_name = self._get_row_value_with_validation( + "Task Name", row) + version = self._get_row_value_with_validation( + "Version", row) + + # Get optional columns + variant = self._get_row_value_with_validation( + "Variant", row) + product_type = self._get_row_value_with_validation( + "Product Type", row) + + pre_product_name = ( + f"{task_name}{variant}{product_type}" + f"{version}".replace(" ", "").lower() + ) + + # get representation data + filename, representation_data = \ + self._get_representation_row_data(row) + + # TODO: batch query of all folder paths and task names + + # get folder entity from folder path + folder_entity = get_folder_by_path( + project_name, folder_path) + + # make sure asset exists + if not folder_entity: + raise CreatorError( + f"Asset '{folder_path}' not found." + ) + + # first get all tasks on the folder entity and then find + task_entity = get_task_by_name( + project_name, folder_entity["id"], task_name) + + # check if task name is valid task in asset doc + if not task_entity: + raise CreatorError( + f"Task '{task_name}' not found in asset doc." + ) + + # get all csv data into one dict and make sure there are no + # duplicates data are already validated and sorted under + # correct existing asset also check if asset exists and if + # task name is valid task in asset doc and representations + # are distributed under products following variants + if folder_path not in csv_data: + csv_data[folder_path] = { + "folder_entity": folder_entity, + "products": { + pre_product_name: { + "task_name": task_name, + "task_type": task_entity["taskType"], + "variant": variant, + "product_type": product_type, + "version": version, + "representations": { + filename: representation_data, + }, + } + } + } + else: + csv_products = csv_data[folder_path]["products"] + if pre_product_name not in csv_products: + csv_products[pre_product_name] = { + "task_name": task_name, + "task_type": task_entity["taskType"], + "variant": variant, + "product_type": product_type, + "version": version, + "representations": { + filename: representation_data, + }, + } + else: + csv_representations = \ + csv_products[pre_product_name]["representations"] + if filename in csv_representations: + raise CreatorError( + f"Duplicate filename '{filename}' in csv file." + ) + csv_representations[filename] = representation_data + + return csv_data + + def _get_representation_row_data(self, row_data): + """Get representation row data""" + # Get required columns first + file_path = self._get_row_value_with_validation( + "File Path", row_data) + frame_start = self._get_row_value_with_validation( + "Frame Start", row_data) + frame_end = self._get_row_value_with_validation( + "Frame End", row_data) + handle_start = self._get_row_value_with_validation( + "Handle Start", row_data) + handle_end = self._get_row_value_with_validation( + "Handle End", row_data) + fps = self._get_row_value_with_validation( + "FPS", row_data) + + # Get optional columns + thumbnail_path = self._get_row_value_with_validation( + "Version Thumbnail", row_data) + colorspace = self._get_row_value_with_validation( + "Representation Colorspace", row_data) + comment = self._get_row_value_with_validation( + "Version Comment", row_data) + repre = self._get_row_value_with_validation( + "Representation", row_data) + slate_exists = self._get_row_value_with_validation( + "Slate Exists", row_data) + repre_tags = self._get_row_value_with_validation( + "Representation Tags", row_data) + + # convert tags value to list + tags_list = copy(self.representations_config["default_tags"]) + if repre_tags: + tags_list = [] + tags_delimiter = self.representations_config["tags_delimiter"] + # strip spaces from repre_tags + if tags_delimiter in repre_tags: + tags = repre_tags.split(tags_delimiter) + for _tag in tags: + tags_list.append(("".join(_tag.strip())).lower()) + else: + tags_list.append(repre_tags) + + representation_data = { + "colorspace": colorspace, + "comment": comment, + "representationName": repre, + "slate": slate_exists, + "tags": tags_list, + "thumbnailPath": thumbnail_path, + "frameStart": int(frame_start), + "frameEnd": int(frame_end), + "handleStart": int(handle_start), + "handleEnd": int(handle_end), + "fps": float(fps), + } + return file_path, representation_data + + def _get_row_value_with_validation( + self, column_name, row_data, default_value=None + ): + """Get row value with validation""" + + # get column data from column config + column_data = None + for column in self.columns_config["columns"]: + if column["name"] == column_name: + column_data = column + break + + if not column_data: + raise CreatorError( + f"Column '{column_name}' not found in column config." + ) + + # get column value from row + column_value = row_data.get(column_name) + column_required = column_data["required_column"] + + # check if column value is not empty string and column is required + if column_value == "" and column_required: + raise CreatorError( + f"Value in column '{column_name}' is required." + ) + + # get column type + column_type = column_data["type"] + # get column validation regex + column_validation = column_data["validation_pattern"] + # get column default value + column_default = default_value or column_data["default"] + + if column_type in ["number", "decimal"] and column_default == 0: + column_default = None + + # check if column value is not empty string + if column_value == "": + # set default value if column value is empty string + column_value = column_default + + # set column value to correct type following column type + if column_type == "number" and column_value is not None: + column_value = int(column_value) + elif column_type == "decimal" and column_value is not None: + column_value = float(column_value) + elif column_type == "bool": + column_value = column_value in ["true", "True"] + + # check if column value matches validation regex + if ( + column_value is not None and + not re.match(str(column_validation), str(column_value)) + ): + raise CreatorError( + f"Column '{column_name}' value '{column_value}' " + f"does not match validation regex '{column_validation}' \n" + f"Row data: {row_data} \n" + f"Column data: {column_data}" + ) + + return column_value + + def _pass_data_to_csv_instance( + self, instance_data, staging_dir, filename + ): + """Pass CSV representation file to instance data""" + + representation = { + "name": "csv", + "ext": "csv", + "files": filename, + "stagingDir": staging_dir, + "stagingDir_persistent": True, + } + + instance_data.update({ + "label": f"CSV: {filename}", + "representations": [representation], + "stagingDir": staging_dir, + "stagingDir_persistent": True, + }) + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_pre_create_attr_defs(self): + """Creating pre-create attributes at creator plugin. + + Returns: + list: list of attribute object instances + """ + # Use same attributes as for instance attributes + attr_defs = [ + FileDef( + "csv_filepath_data", + folders=False, + extensions=[".csv"], + allow_sequences=False, + single_item=True, + label="CSV File", + ), + ] + return attr_defs diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py new file mode 100644 index 0000000000..82b109be28 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py @@ -0,0 +1,96 @@ +from pathlib import Path + +from ayon_core.pipeline import ( + CreatedInstance, +) + +from ayon_core.lib.attribute_definitions import ( + FileDef, + BoolDef, + TextDef, +) +from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class EditorialPackageCreator(TrayPublishCreator): + """Creates instance for OTIO file from published folder. + + Folder contains OTIO file and exported .mov files. Process should publish + whole folder as single `editorial_pckg` product type and (possibly) convert + .mov files into different format and copy them into `publish` `resources` + subfolder. + """ + identifier = "editorial_pckg" + label = "Editorial package" + product_type = "editorial_pckg" + description = "Publish folder with OTIO file and resources" + + # Position batch creator after simple creators + order = 120 + + conversion_enabled = False + + def apply_settings(self, project_settings): + self.conversion_enabled = ( + project_settings["traypublisher"] + ["publish"] + ["ExtractEditorialPckgConversion"] + ["conversion_enabled"] + ) + + def get_icon(self): + return "fa.folder" + + def create(self, product_name, instance_data, pre_create_data): + folder_path = pre_create_data.get("folder_path") + if not folder_path: + return + + instance_data["creator_attributes"] = { + "folder_path": (Path(folder_path["directory"]) / + Path(folder_path["filenames"][0])).as_posix(), + "conversion_enabled": pre_create_data["conversion_enabled"] + } + + # Create new instance + new_instance = CreatedInstance(self.product_type, product_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return [ + FileDef( + "folder_path", + folders=True, + single_item=True, + extensions=[], + allow_sequences=False, + label="Folder path" + ), + BoolDef("conversion_enabled", + tooltip="Convert to output defined in Settings.", + default=self.conversion_enabled, + label="Convert resources"), + ] + + def get_instance_attr_defs(self): + return [ + TextDef( + "folder_path", + label="Folder path", + disabled=True + ), + BoolDef("conversion_enabled", + tooltip="Convert to output defined in Settings.", + label="Convert resources"), + ] + + def get_detail_description(self): + return """# Publish folder with OTIO file and video clips + + Folder contains OTIO file and exported .mov files. Process should + publish whole folder as single `editorial_pckg` product type and + (possibly) convert .mov files into different format and copy them into + `publish` `resources` subfolder. + """ diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py new file mode 100644 index 0000000000..33536d0854 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py @@ -0,0 +1,47 @@ +from pprint import pformat +import pyblish.api +from ayon_core.pipeline import publish + + +class CollectCSVIngestInstancesData( + pyblish.api.InstancePlugin, + publish.AYONPyblishPluginMixin, + publish.ColormanagedPyblishPluginMixin +): + """Collect CSV Ingest data from instance. + """ + + label = "Collect CSV Ingest instances data" + order = pyblish.api.CollectorOrder + 0.1 + hosts = ["traypublisher"] + families = ["csv_ingest"] + + def process(self, instance): + + # expecting [(colorspace, repre_data), ...] + prepared_repres_data_items = instance.data[ + "prepared_data_for_repres"] + + for prep_repre_data in prepared_repres_data_items: + type = prep_repre_data["type"] + colorspace = prep_repre_data["colorspace"] + repre_data = prep_repre_data["representation"] + + # thumbnails should be skipped + if type == "media": + # colorspace name is passed from CSV column + self.set_representation_colorspace( + repre_data, instance.context, colorspace + ) + elif type == "media" and colorspace is None: + # TODO: implement colorspace file rules file parsing + self.log.warning( + "Colorspace is not defined in csv for following" + f" representation: {pformat(repre_data)}" + ) + pass + elif type == "thumbnail": + # thumbnails should be skipped + pass + + instance.data["representations"].append(repre_data) diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py new file mode 100644 index 0000000000..cb1277546c --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py @@ -0,0 +1,58 @@ +"""Produces instance.data["editorial_pckg"] data used during integration. + +Requires: + instance.data["creator_attributes"]["path"] - from creator + +Provides: + instance -> editorial_pckg (dict): + folder_path (str) + otio_path (str) - from dragged folder + resource_paths (list) + +""" +import os + +import pyblish.api + +from ayon_core.lib.transcoding import VIDEO_EXTENSIONS + + +class CollectEditorialPackage(pyblish.api.InstancePlugin): + """Collects path to OTIO file and resources""" + + label = "Collect Editorial Package" + order = pyblish.api.CollectorOrder - 0.1 + + hosts = ["traypublisher"] + families = ["editorial_pckg"] + + def process(self, instance): + folder_path = instance.data["creator_attributes"]["folder_path"] + if not folder_path or not os.path.exists(folder_path): + self.log.info(( + "Instance doesn't contain collected existing folder path." + )) + return + + instance.data["editorial_pckg"] = {} + instance.data["editorial_pckg"]["folder_path"] = folder_path + + otio_path, resource_paths = ( + self._get_otio_and_resource_paths(folder_path)) + + instance.data["editorial_pckg"]["otio_path"] = otio_path + instance.data["editorial_pckg"]["resource_paths"] = resource_paths + + def _get_otio_and_resource_paths(self, folder_path): + otio_path = None + resource_paths = [] + + file_names = os.listdir(folder_path) + for filename in file_names: + _, ext = os.path.splitext(filename) + file_path = os.path.join(folder_path, filename) + if ext == ".otio": + otio_path = file_path + elif ext in VIDEO_EXTENSIONS: + resource_paths.append(file_path) + return otio_path, resource_paths diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py index 8e29a0048d..5fbb9a6f4c 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py @@ -1,10 +1,7 @@ import pyblish.api -from ayon_core.pipeline import ( - publish, - registered_host -) from ayon_core.lib import EnumDef from ayon_core.pipeline import colorspace +from ayon_core.pipeline import publish from ayon_core.pipeline.publish import KnownPublishError @@ -19,9 +16,10 @@ class CollectColorspace(pyblish.api.InstancePlugin, families = ["render", "plate", "reference", "image", "online"] enabled = False - colorspace_items = [ + default_colorspace_items = [ (None, "Don't override") ] + colorspace_items = list(default_colorspace_items) colorspace_attr_show = False config_items = None @@ -69,14 +67,13 @@ class CollectColorspace(pyblish.api.InstancePlugin, @classmethod def apply_settings(cls, project_settings): - host = registered_host() - host_name = host.name - project_name = host.get_current_project_name() - config_data = colorspace.get_imageio_config( - project_name, host_name, + config_data = colorspace.get_current_context_imageio_config_preset( project_settings=project_settings ) + enabled = False + colorspace_items = list(cls.default_colorspace_items) + config_items = None if config_data: filepath = config_data["path"] config_items = colorspace.get_ocio_config_colorspaces(filepath) @@ -85,9 +82,11 @@ class CollectColorspace(pyblish.api.InstancePlugin, include_aliases=True, include_roles=True ) - cls.config_items = config_items - cls.colorspace_items.extend(labeled_colorspaces) - cls.enabled = True + colorspace_items.extend(labeled_colorspaces) + + cls.config_items = config_items + cls.colorspace_items = colorspace_items + cls.enabled = enabled @classmethod def get_attribute_defs(cls): diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py similarity index 64% rename from client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py index 4d203649c7..2e564a2e4e 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py @@ -10,9 +10,13 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.491 label = "Collect Missing Frame Data From Folder" - families = ["plate", "pointcache", - "vdbcache", "online", - "render"] + families = [ + "plate", + "pointcache", + "vdbcache", + "online", + "render", + ] hosts = ["traypublisher"] def process(self, instance): @@ -22,16 +26,26 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin): "frameStart", "frameEnd", "handleStart", - "handleEnd" + "handleEnd", ): if key not in instance.data: missing_keys.append(key) + + # Skip the logic if all keys are already collected. + # NOTE: In editorial is not 'folderEntity' filled, so it would crash + # even if we don't need it. + if not missing_keys: + return + keys_set = [] folder_attributes = instance.data["folderEntity"]["attrib"] for key in missing_keys: if key in folder_attributes: instance.data[key] = folder_attributes[key] keys_set.append(key) + if keys_set: - self.log.debug(f"Frame range data {keys_set} " - "has been collected from folder entity.") + self.log.debug( + f"Frame range data {keys_set} " + "has been collected from folder entity." + ) diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py new file mode 100644 index 0000000000..4bdf7c0493 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py @@ -0,0 +1,31 @@ +import pyblish.api + +from ayon_core.pipeline import publish + + +class ExtractCSVFile(publish.Extractor): + """ + Extractor export CSV file + """ + + label = "Extract CSV file" + order = pyblish.api.ExtractorOrder - 0.45 + families = ["csv_ingest_file"] + hosts = ["traypublisher"] + + def process(self, instance): + + csv_file_data = instance.data["csvFileData"] + + representation_csv = { + 'name': "csv_data", + 'ext': "csv", + 'files': csv_file_data["filename"], + "stagingDir": csv_file_data["staging_dir"], + "stagingDir_persistent": True + } + + instance.data["representations"].append(representation_csv) + + self.log.info("Added CSV file representation: {}".format( + representation_csv)) diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py new file mode 100644 index 0000000000..6dd4e84704 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py @@ -0,0 +1,232 @@ +import copy +import os.path +import subprocess + +import opentimelineio + +import pyblish.api + +from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess +from ayon_core.pipeline import publish + + +class ExtractEditorialPckgConversion(publish.Extractor): + """Replaces movie paths in otio file with publish rootless + + Prepares movie resources for integration (adds them to `transfers`). + Converts .mov files according to output definition. + """ + + label = "Extract Editorial Package" + order = pyblish.api.ExtractorOrder - 0.45 + hosts = ["traypublisher"] + families = ["editorial_pckg"] + + def process(self, instance): + editorial_pckg_data = instance.data.get("editorial_pckg") + + otio_path = editorial_pckg_data["otio_path"] + otio_basename = os.path.basename(otio_path) + staging_dir = self.staging_dir(instance) + + editorial_pckg_repre = { + 'name': "editorial_pckg", + 'ext': "otio", + 'files': otio_basename, + "stagingDir": staging_dir, + } + otio_staging_path = os.path.join(staging_dir, otio_basename) + + instance.data["representations"].append(editorial_pckg_repre) + + publish_resource_folder = self._get_publish_resource_folder(instance) + resource_paths = editorial_pckg_data["resource_paths"] + transfers = self._get_transfers(resource_paths, + publish_resource_folder) + + project_settings = instance.context.data["project_settings"] + output_def = (project_settings["traypublisher"] + ["publish"] + ["ExtractEditorialPckgConversion"] + ["output"]) + + conversion_enabled = (instance.data["creator_attributes"] + ["conversion_enabled"]) + + if conversion_enabled and output_def["ext"]: + transfers = self._convert_resources(output_def, transfers) + + instance.data["transfers"] = transfers + + source_to_rootless = self._get_resource_path_mapping(instance, + transfers) + + otio_data = editorial_pckg_data["otio_data"] + otio_data = self._replace_target_urls(otio_data, source_to_rootless) + + opentimelineio.adapters.write_to_file(otio_data, otio_staging_path) + + self.log.info("Added Editorial Package representation: {}".format( + editorial_pckg_repre)) + + def _get_publish_resource_folder(self, instance): + """Calculates publish folder and create it.""" + publish_path = self._get_published_path(instance) + publish_folder = os.path.dirname(publish_path) + publish_resource_folder = os.path.join(publish_folder, "resources") + + if not os.path.exists(publish_resource_folder): + os.makedirs(publish_resource_folder, exist_ok=True) + return publish_resource_folder + + def _get_resource_path_mapping(self, instance, transfers): + """Returns dict of {source_mov_path: rootless_published_path}.""" + replace_paths = {} + anatomy = instance.context.data["anatomy"] + for source, destination in transfers: + rootless_path = self._get_rootless(anatomy, destination) + source_file_name = os.path.basename(source) + replace_paths[source_file_name] = rootless_path + return replace_paths + + def _get_transfers(self, resource_paths, publish_resource_folder): + """Returns list of tuples (source, destination) with movie paths.""" + transfers = [] + for res_path in resource_paths: + res_basename = os.path.basename(res_path) + pub_res_path = os.path.join(publish_resource_folder, res_basename) + transfers.append((res_path, pub_res_path)) + return transfers + + def _replace_target_urls(self, otio_data, replace_paths): + """Replace original movie paths with published rootless ones.""" + for track in otio_data.tracks: + for clip in track: + # Check if the clip has a media reference + if clip.media_reference is not None: + # Access the target_url from the media reference + target_url = clip.media_reference.target_url + if not target_url: + continue + file_name = os.path.basename(target_url) + replace_path = replace_paths.get(file_name) + if replace_path: + clip.media_reference.target_url = replace_path + if clip.name == file_name: + clip.name = os.path.basename(replace_path) + + return otio_data + + def _get_rootless(self, anatomy, path): + """Try to find rootless {root[work]} path from `path`""" + success, rootless_path = anatomy.find_root_template_from_path( + path) + if not success: + # `rootless_path` is not set to `output_dir` if none of roots match + self.log.warning( + f"Could not find root path for remapping '{path}'." + ) + rootless_path = path + + return rootless_path + + def _get_published_path(self, instance): + """Calculates expected `publish` folder""" + # determine published path from Anatomy. + template_data = instance.data.get("anatomyData") + rep = instance.data["representations"][0] + template_data["representation"] = rep.get("name") + template_data["ext"] = rep.get("ext") + template_data["comment"] = None + + anatomy = instance.context.data["anatomy"] + template_data["root"] = anatomy.roots + template = anatomy.get_template_item("publish", "default", "path") + template_filled = template.format_strict(template_data) + return os.path.normpath(template_filled) + + def _convert_resources(self, output_def, transfers): + """Converts all resource files to configured format.""" + out_extension = output_def["ext"] + if not out_extension: + self.log.warning("No output extension configured in " + "ayon+settings://traypublisher/publish/ExtractEditorialPckgConversion") # noqa + return transfers + + final_transfers = [] + out_def_ffmpeg_args = output_def["ffmpeg_args"] + ffmpeg_input_args = [ + value.strip() + for value in out_def_ffmpeg_args["input"] + if value.strip() + ] + ffmpeg_video_filters = [ + value.strip() + for value in out_def_ffmpeg_args["video_filters"] + if value.strip() + ] + ffmpeg_audio_filters = [ + value.strip() + for value in out_def_ffmpeg_args["audio_filters"] + if value.strip() + ] + ffmpeg_output_args = [ + value.strip() + for value in out_def_ffmpeg_args["output"] + if value.strip() + ] + ffmpeg_input_args = self._split_ffmpeg_args(ffmpeg_input_args) + + generic_args = [ + subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg")) + ] + generic_args.extend(ffmpeg_input_args) + if ffmpeg_video_filters: + generic_args.append("-filter:v") + generic_args.append( + "\"{}\"".format(",".join(ffmpeg_video_filters))) + + if ffmpeg_audio_filters: + generic_args.append("-filter:a") + generic_args.append( + "\"{}\"".format(",".join(ffmpeg_audio_filters))) + + for source, destination in transfers: + base_name = os.path.basename(destination) + file_name, ext = os.path.splitext(base_name) + dest_path = os.path.join(os.path.dirname(destination), + f"{file_name}.{out_extension}") + final_transfers.append((source, dest_path)) + + all_args = copy.deepcopy(generic_args) + all_args.append(f"-i \"{source}\"") + all_args.extend(ffmpeg_output_args) # order matters + all_args.append(f"\"{dest_path}\"") + subprcs_cmd = " ".join(all_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + run_subprocess(subprcs_cmd, shell=True, logger=self.log) + return final_transfers + + def _split_ffmpeg_args(self, in_args): + """Makes sure all entered arguments are separated in individual items. + + Split each argument string with " -" to identify if string contains + one or more arguments. + """ + splitted_args = [] + for arg in in_args: + sub_args = arg.split(" -") + if len(sub_args) == 1: + if arg and arg not in splitted_args: + splitted_args.append(arg) + continue + + for idx, arg in enumerate(sub_args): + if idx != 0: + arg = "-" + arg + + if arg and arg not in splitted_args: + splitted_args.append(arg) + return splitted_args diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py new file mode 100644 index 0000000000..c63c4a6a73 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py @@ -0,0 +1,68 @@ +import os +import opentimelineio + +import pyblish.api +from ayon_core.pipeline import PublishValidationError + + +class ValidateEditorialPackage(pyblish.api.InstancePlugin): + """Checks that published folder contains all resources from otio + + Currently checks only by file names and expects flat structure. + It ignores path to resources in otio file as folder might be dragged in and + published from different location than it was created. + """ + + label = "Validate Editorial Package" + order = pyblish.api.ValidatorOrder - 0.49 + + hosts = ["traypublisher"] + families = ["editorial_pckg"] + + def process(self, instance): + editorial_pckg_data = instance.data.get("editorial_pckg") + if not editorial_pckg_data: + raise PublishValidationError("Editorial package not collected") + + folder_path = editorial_pckg_data["folder_path"] + + otio_path = editorial_pckg_data["otio_path"] + if not otio_path: + raise PublishValidationError( + f"Folder {folder_path} missing otio file") + + resource_paths = editorial_pckg_data["resource_paths"] + + resource_file_names = {os.path.basename(path) + for path in resource_paths} + + otio_data = opentimelineio.adapters.read_from_file(otio_path) + + target_urls = self._get_all_target_urls(otio_data) + missing_files = set() + for target_url in target_urls: + target_basename = os.path.basename(target_url) + if target_basename not in resource_file_names: + missing_files.add(target_basename) + + if missing_files: + raise PublishValidationError( + f"Otio file contains missing files `{missing_files}`.\n\n" + f"Please add them to `{folder_path}` and republish.") + + instance.data["editorial_pckg"]["otio_data"] = otio_data + + def _get_all_target_urls(self, otio_data): + target_urls = [] + + # Iterate through tracks, clips, or other elements + for track in otio_data.tracks: + for clip in track: + # Check if the clip has a media reference + if clip.media_reference is not None: + # Access the target_url from the media reference + target_url = clip.media_reference.target_url + if target_url: + target_urls.append(target_url) + + return target_urls diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py index 3a62536507..0b4f8e16c1 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py @@ -16,6 +16,7 @@ class ValidateExistingVersion( order = ValidateContentsOrder hosts = ["traypublisher"] + targets = ["local"] actions = [RepairAction] diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py index 4f11571efe..13f13b05bb 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py @@ -16,6 +16,8 @@ class ValidateFrameRange(OptionalPyblishPluginMixin, label = "Validate Frame Range" hosts = ["traypublisher"] families = ["render", "plate"] + targets = ["local"] + order = ValidateContentsOrder optional = True diff --git a/client/ayon_core/hosts/unreal/lib.py b/client/ayon_core/hosts/unreal/lib.py index 37122b2096..185853a0aa 100644 --- a/client/ayon_core/hosts/unreal/lib.py +++ b/client/ayon_core/hosts/unreal/lib.py @@ -80,17 +80,21 @@ def get_engine_versions(env=None): def get_editor_exe_path(engine_path: Path, engine_version: str) -> Path: """Get UE Editor executable path.""" ue_path = engine_path / "Engine/Binaries" + + ue_name = "UnrealEditor" + + # handle older versions of Unreal Engine + if engine_version.split(".")[0] == "4": + ue_name = "UE4Editor" + if platform.system().lower() == "windows": - if engine_version.split(".")[0] == "4": - ue_path /= "Win64/UE4Editor.exe" - elif engine_version.split(".")[0] == "5": - ue_path /= "Win64/UnrealEditor.exe" + ue_path /= f"Win64/{ue_name}.exe" elif platform.system().lower() == "linux": - ue_path /= "Linux/UE4Editor" + ue_path /= f"Linux/{ue_name}" elif platform.system().lower() == "darwin": - ue_path /= "Mac/UE4Editor" + ue_path /= f"Mac/{ue_name}" return ue_path diff --git a/client/ayon_core/hosts/unreal/ue_workers.py b/client/ayon_core/hosts/unreal/ue_workers.py index e3f8729c2e..256c0557be 100644 --- a/client/ayon_core/hosts/unreal/ue_workers.py +++ b/client/ayon_core/hosts/unreal/ue_workers.py @@ -260,11 +260,11 @@ class UEProjectGenerationWorker(UEWorker): self.failed.emit(msg, return_code) raise RuntimeError(msg) - # ensure we have PySide2 installed in engine + # ensure we have PySide2/6 installed in engine self.progress.emit(0) self.stage_begin.emit( - (f"Checking PySide2 installation... {stage_count} " + (f"Checking Qt bindings installation... {stage_count} " f" out of {stage_count}")) python_path = None if platform.system().lower() == "windows": @@ -287,11 +287,30 @@ class UEProjectGenerationWorker(UEWorker): msg = f"Unreal Python not found at {python_path}" self.failed.emit(msg, 1) raise RuntimeError(msg) - pyside_cmd = [python_path.as_posix(), - "-m", - "pip", - "install", - "pyside2"] + + pyside_version = "PySide2" + ue_version = self.ue_version.split(".") + if int(ue_version[0]) == 5 and int(ue_version[1]) >= 4: + # Use PySide6 6.6.3 because 6.7.0 had a bug + # - 'QPushButton' can't be added to 'QBoxLayout' + pyside_version = "PySide6==6.6.3" + + site_packages_prefix = python_path.parent.as_posix() + + pyside_cmd = [ + python_path.as_posix(), + "-m", "pip", + "install", + "--ignore-installed", + pyside_version, + + ] + + if platform.system().lower() == "windows": + pyside_cmd += ["--target", site_packages_prefix] + + print(f"--- Installing {pyside_version} ...") + print(" ".join(pyside_cmd)) pyside_install = subprocess.Popen(pyside_cmd, stdout=subprocess.PIPE, @@ -306,8 +325,8 @@ class UEProjectGenerationWorker(UEWorker): return_code = pyside_install.wait() if return_code and return_code != 0: - msg = ("Failed to create the project! " - "The installation of PySide2 has failed!") + msg = (f"Failed to create the project! {return_code} " + f"The installation of {pyside_version} has failed!: {pyside_install}") self.failed.emit(msg, return_code) raise RuntimeError(msg) diff --git a/client/ayon_core/lib/__init__.py b/client/ayon_core/lib/__init__.py index 408262ca42..e25d3479ee 100644 --- a/client/ayon_core/lib/__init__.py +++ b/client/ayon_core/lib/__init__.py @@ -27,6 +27,10 @@ from .local_settings import ( get_openpype_username, ) from .ayon_connection import initialize_ayon_connection +from .cache import ( + CacheItem, + NestedCacheItem, +) from .events import ( emit_event, register_event_callback @@ -135,6 +139,7 @@ from .path_tools import ( ) from .ayon_info import ( + is_in_ayon_launcher_process, is_running_from_build, is_using_ayon_console, is_staging_enabled, @@ -157,6 +162,9 @@ __all__ = [ "initialize_ayon_connection", + "CacheItem", + "NestedCacheItem", + "emit_event", "register_event_callback", @@ -241,6 +249,7 @@ __all__ = [ "Logger", + "is_in_ayon_launcher_process", "is_running_from_build", "is_using_ayon_console", "is_staging_enabled", diff --git a/client/ayon_core/lib/ayon_info.py b/client/ayon_core/lib/ayon_info.py index fc09a7c90c..c4333fab95 100644 --- a/client/ayon_core/lib/ayon_info.py +++ b/client/ayon_core/lib/ayon_info.py @@ -1,4 +1,5 @@ import os +import sys import json import datetime import platform @@ -25,6 +26,18 @@ def get_ayon_launcher_version(): return content["__version__"] +def is_in_ayon_launcher_process(): + """Determine if current process is running from AYON launcher. + + Returns: + bool: True if running from AYON launcher. + + """ + ayon_executable_path = os.path.normpath(os.environ["AYON_EXECUTABLE"]) + executable_path = os.path.normpath(sys.executable) + return ayon_executable_path == executable_path + + def is_running_from_build(): """Determine if current process is running from build or code. diff --git a/client/ayon_core/lib/cache.py b/client/ayon_core/lib/cache.py new file mode 100644 index 0000000000..dc83520f76 --- /dev/null +++ b/client/ayon_core/lib/cache.py @@ -0,0 +1,250 @@ +import time +import collections + +InitInfo = collections.namedtuple( + "InitInfo", + ["default_factory", "lifetime"] +) + + +def _default_factory_func(): + return None + + +class CacheItem: + """Simple cache item with lifetime and default factory for default value. + + Default factory should return default value that is used on init + and on reset. + + Args: + default_factory (Optional[callable]): Function that returns default + value used on init and on reset. + lifetime (Optional[int]): Lifetime of the cache data in seconds. + Default lifetime is 120 seconds. + + """ + def __init__(self, default_factory=None, lifetime=None): + if lifetime is None: + lifetime = 120 + self._lifetime = lifetime + self._last_update = None + if default_factory is None: + default_factory = _default_factory_func + self._default_factory = default_factory + self._data = default_factory() + + @property + def is_valid(self): + """Is cache valid to use. + + Return: + bool: True if cache is valid, False otherwise. + + """ + if self._last_update is None: + return False + + return (time.time() - self._last_update) < self._lifetime + + def set_lifetime(self, lifetime): + """Change lifetime of cache item. + + Args: + lifetime (int): Lifetime of the cache data in seconds. + """ + + self._lifetime = lifetime + + def set_invalid(self): + """Set cache as invalid.""" + + self._last_update = None + + def reset(self): + """Set cache as invalid and reset data.""" + + self._last_update = None + self._data = self._default_factory() + + def get_data(self): + """Receive cached data. + + Returns: + Any: Any data that are cached. + + """ + return self._data + + def update_data(self, data): + """Update cache data. + + Args: + data (Any): Any data that are cached. + + """ + self._data = data + self._last_update = time.time() + + +class NestedCacheItem: + """Helper for cached items stored in nested structure. + + Example: + >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0) + >>> cache["a"]["b"].is_valid + False + >>> cache["a"]["b"].get_data() + 0 + >>> cache["a"]["b"] = 1 + >>> cache["a"]["b"].is_valid + True + >>> cache["a"]["b"].get_data() + 1 + >>> cache.reset() + >>> cache["a"]["b"].is_valid + False + + Args: + levels (int): Number of nested levels where read cache is stored. + default_factory (Optional[callable]): Function that returns default + value used on init and on reset. + lifetime (Optional[int]): Lifetime of the cache data in seconds. + Default value is based on default value of 'CacheItem'. + _init_info (Optional[InitInfo]): Private argument. Init info for + nested cache where created from parent item. + + """ + def __init__( + self, levels=1, default_factory=None, lifetime=None, _init_info=None + ): + if levels < 1: + raise ValueError("Nested levels must be greater than 0") + self._data_by_key = {} + if _init_info is None: + _init_info = InitInfo(default_factory, lifetime) + self._init_info = _init_info + self._levels = levels + + def __getitem__(self, key): + """Get cached data. + + Args: + key (str): Key of the cache item. + + Returns: + Union[NestedCacheItem, CacheItem]: Cache item. + + """ + cache = self._data_by_key.get(key) + if cache is None: + if self._levels > 1: + cache = NestedCacheItem( + levels=self._levels - 1, + _init_info=self._init_info + ) + else: + cache = CacheItem( + self._init_info.default_factory, + self._init_info.lifetime + ) + self._data_by_key[key] = cache + return cache + + def __setitem__(self, key, value): + """Update cached data. + + Args: + key (str): Key of the cache item. + value (Any): Any data that are cached. + + """ + if self._levels > 1: + raise AttributeError(( + "{} does not support '__setitem__'. Lower nested level by {}" + ).format(self.__class__.__name__, self._levels - 1)) + cache = self[key] + cache.update_data(value) + + def get(self, key): + """Get cached data. + + Args: + key (str): Key of the cache item. + + Returns: + Union[NestedCacheItem, CacheItem]: Cache item. + + """ + return self[key] + + def cached_count(self): + """Amount of cached items. + + Returns: + int: Amount of cached items. + + """ + return len(self._data_by_key) + + def clear_key(self, key): + """Clear cached item by key. + + Args: + key (str): Key of the cache item. + + """ + self._data_by_key.pop(key, None) + + def clear_invalid(self): + """Clear all invalid cache items. + + Note: + To clear all cache items use 'reset'. + + """ + changed = {} + children_are_nested = self._levels > 1 + for key, cache in tuple(self._data_by_key.items()): + if children_are_nested: + output = cache.clear_invalid() + if output: + changed[key] = output + if not cache.cached_count(): + self._data_by_key.pop(key) + elif not cache.is_valid: + changed[key] = cache.get_data() + self._data_by_key.pop(key) + return changed + + def reset(self): + """Reset cache. + + Note: + To clear only invalid cache items use 'clear_invalid'. + + """ + self._data_by_key = {} + + def set_lifetime(self, lifetime): + """Change lifetime of all children cache items. + + Args: + lifetime (int): Lifetime of the cache data in seconds. + + """ + self._init_info.lifetime = lifetime + for cache in self._data_by_key.values(): + cache.set_lifetime(lifetime) + + @property + def is_valid(self): + """Raise reasonable error when called on wrong level. + + Raises: + AttributeError: If called on nested cache item. + + """ + raise AttributeError(( + "{} does not support 'is_valid'. Lower nested level by '{}'" + ).format(self.__class__.__name__, self._levels)) diff --git a/client/ayon_core/modules/deadline/__init__.py b/client/ayon_core/modules/deadline/__init__.py index 5631e501d8..683d8dbe4a 100644 --- a/client/ayon_core/modules/deadline/__init__.py +++ b/client/ayon_core/modules/deadline/__init__.py @@ -1,6 +1,8 @@ from .deadline_module import DeadlineModule +from .version import __version__ __all__ = ( "DeadlineModule", + "__version__" ) diff --git a/client/ayon_core/modules/deadline/abstract_submit_deadline.py b/client/ayon_core/modules/deadline/abstract_submit_deadline.py index 2e0518ae20..564966b6a0 100644 --- a/client/ayon_core/modules/deadline/abstract_submit_deadline.py +++ b/client/ayon_core/modules/deadline/abstract_submit_deadline.py @@ -29,15 +29,11 @@ from ayon_core.pipeline.publish.lib import ( JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) -# TODO both 'requests_post' and 'requests_get' should not set 'verify' based -# on environment variable. This should be done in a more controlled way, -# e.g. each deadline url could have checkbox to enabled/disable -# ssl verification. def requests_post(*args, **kwargs): """Wrap request post method. - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline server is + Disabling SSL certificate validation if ``verify`` kwarg is set to False. + This is useful when Deadline server is running with self-signed certificates and its certificate is not added to trusted certificates on client machines. @@ -46,9 +42,9 @@ def requests_post(*args, **kwargs): of defense SSL is providing, and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", - True) else True # noqa + auth = kwargs.get("auth") + if auth: + kwargs["auth"] = tuple(auth) # explicit cast to tuple # add 10sec timeout before bailing out kwargs['timeout'] = 10 return requests.post(*args, **kwargs) @@ -57,8 +53,8 @@ def requests_post(*args, **kwargs): def requests_get(*args, **kwargs): """Wrap request get method. - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline server is + Disabling SSL certificate validation if ``verify`` kwarg is set to False. + This is useful when Deadline server is running with self-signed certificates and its certificate is not added to trusted certificates on client machines. @@ -67,9 +63,9 @@ def requests_get(*args, **kwargs): of defense SSL is providing, and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", - True) else True # noqa + auth = kwargs.get("auth") + if auth: + kwargs["auth"] = tuple(auth) # add 10sec timeout before bailing out kwargs['timeout'] = 10 return requests.get(*args, **kwargs) @@ -434,9 +430,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """Plugin entry point.""" self._instance = instance context = instance.context - self._deadline_url = context.data.get("defaultDeadline") - self._deadline_url = instance.data.get( - "deadlineUrl", self._deadline_url) + self._deadline_url = instance.data["deadline"]["url"] assert self._deadline_url, "Requires Deadline Webservice URL" @@ -460,7 +454,9 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, self.plugin_info = self.get_plugin_info() self.aux_files = self.get_aux_files() - job_id = self.process_submission() + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + job_id = self.process_submission(auth, verify) self.log.info("Submitted job to Deadline: {}.".format(job_id)) # TODO: Find a way that's more generic and not render type specific @@ -473,10 +469,10 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, job_info=render_job_info, plugin_info=render_plugin_info ) - render_job_id = self.submit(payload) + render_job_id = self.submit(payload, auth, verify) self.log.info("Render job id: %s", render_job_id) - def process_submission(self): + def process_submission(self, auth=None, verify=True): """Process data for submission. This takes Deadline JobInfo, PluginInfo, AuxFile, creates payload @@ -487,7 +483,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """ payload = self.assemble_payload() - return self.submit(payload) + return self.submit(payload, auth, verify) @abstractmethod def get_job_info(self): @@ -577,7 +573,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, "AuxFiles": aux_files or self.aux_files } - def submit(self, payload): + def submit(self, payload, auth, verify): """Submit payload to Deadline API end-point. This takes payload in the form of JSON file and POST it to @@ -585,6 +581,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, Args: payload (dict): dict to become json in deadline submission. + auth (tuple): (username, password) + verify (bool): verify SSL certificate if present Returns: str: resulting Deadline job id. @@ -594,7 +592,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """ url = "{}/api/jobs".format(self._deadline_url) - response = requests_post(url, json=payload) + response = requests_post( + url, json=payload, auth=auth, verify=verify) if not response.ok: self.log.error("Submission failed!") self.log.error(response.status_code) diff --git a/client/ayon_core/modules/deadline/deadline_module.py b/client/ayon_core/modules/deadline/deadline_module.py index c0ba83477e..b1089bbfe2 100644 --- a/client/ayon_core/modules/deadline/deadline_module.py +++ b/client/ayon_core/modules/deadline/deadline_module.py @@ -19,23 +19,23 @@ class DeadlineModule(AYONAddon, IPluginPaths): def initialize(self, studio_settings): # This module is always enabled - deadline_urls = {} + deadline_servers_info = {} enabled = self.name in studio_settings if enabled: deadline_settings = studio_settings[self.name] - deadline_urls = { - url_item["name"]: url_item["value"] + deadline_servers_info = { + url_item["name"]: url_item for url_item in deadline_settings["deadline_urls"] } - if enabled and not deadline_urls: + if enabled and not deadline_servers_info: enabled = False self.log.warning(( "Deadline Webservice URLs are not specified. Disabling addon." )) self.enabled = enabled - self.deadline_urls = deadline_urls + self.deadline_servers_info = deadline_servers_info def get_plugin_paths(self): """Deadline plugin paths.""" @@ -45,13 +45,15 @@ class DeadlineModule(AYONAddon, IPluginPaths): } @staticmethod - def get_deadline_pools(webservice, log=None): + def get_deadline_pools(webservice, auth=None, log=None): """Get pools from Deadline. Args: webservice (str): Server url. - log (Logger) + auth (Optional[Tuple[str, str]]): Tuple containing username, + password + log (Optional[Logger]): Logger to log errors to, if provided. Returns: - list: Pools. + List[str]: Pools. Throws: RuntimeError: If deadline webservice is unreachable. @@ -63,7 +65,10 @@ class DeadlineModule(AYONAddon, IPluginPaths): argument = "{}/api/pools?NamesOnly=true".format(webservice) try: - response = requests_get(argument) + kwargs = {} + if auth: + kwargs["auth"] = auth + response = requests_get(argument, **kwargs) except requests.exceptions.ConnectionError as exc: msg = 'Cannot connect to DL web service {}'.format(webservice) log.error(msg) diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index ea4b7a213e..22022831a0 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -13,17 +13,45 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" # Run before collect_render. - order = pyblish.api.CollectorOrder + 0.005 + order = pyblish.api.CollectorOrder + 0.225 label = "Deadline Webservice from the Instance" - families = ["rendering", "renderlayer"] - hosts = ["maya"] + targets = ["local"] + families = ["render", + "rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender", + "usdrender", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop", + "publish.hou", + "image"] # for Fusion def process(self, instance): - instance.data["deadlineUrl"] = self._collect_deadline_url(instance) - instance.data["deadlineUrl"] = \ - instance.data["deadlineUrl"].strip().rstrip("/") + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + if not instance.data.get("deadline"): + instance.data["deadline"] = {} + + # todo: separate logic should be removed, all hosts should have same + host_name = instance.context.data["hostName"] + if host_name == "maya": + deadline_url = self._collect_deadline_url(instance) + else: + deadline_url = (instance.data.get("deadlineUrl") or # backwards + instance.data.get("deadline", {}).get("url")) + if deadline_url: + instance.data["deadline"]["url"] = deadline_url.strip().rstrip("/") + else: + instance.data["deadline"]["url"] = instance.context.data["deadline"]["defaultUrl"] # noqa self.log.debug( - "Using {} for submission.".format(instance.data["deadlineUrl"])) + "Using {} for submission".format(instance.data["deadline"]["url"])) def _collect_deadline_url(self, render_instance): # type: (pyblish.api.Instance) -> str @@ -49,13 +77,13 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): ["project_settings"] ["deadline"] ) - - default_server = render_instance.context.data["defaultDeadline"] + default_server_url = (render_instance.context.data["deadline"] + ["defaultUrl"]) # QUESTION How and where is this is set? Should be removed? instance_server = render_instance.data.get("deadlineServers") if not instance_server: self.log.debug("Using default server.") - return default_server + return default_server_url # Get instance server as sting. if isinstance(instance_server, int): @@ -66,7 +94,7 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): default_servers = { url_item["name"]: url_item["value"] - for url_item in deadline_settings["deadline_urls"] + for url_item in deadline_settings["deadline_servers_info"] } project_servers = ( render_instance.context.data diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py index b7ca227b01..9238e0ed95 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -18,10 +18,9 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): """ # Run before collect_deadline_server_instance. - order = pyblish.api.CollectorOrder + 0.0025 + order = pyblish.api.CollectorOrder + 0.200 label = "Default Deadline Webservice" - - pass_mongo_url = False + targets = ["local"] def process(self, context): try: @@ -33,15 +32,17 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): deadline_settings = context.data["project_settings"]["deadline"] deadline_server_name = deadline_settings["deadline_server"] - deadline_webservice = None + dl_server_info = None if deadline_server_name: - deadline_webservice = deadline_module.deadline_urls.get( + dl_server_info = deadline_module.deadline_servers_info.get( deadline_server_name) - default_deadline_webservice = deadline_module.deadline_urls["default"] - deadline_webservice = ( - deadline_webservice - or default_deadline_webservice - ) + if dl_server_info: + deadline_url = dl_server_info["value"] + else: + default_dl_server_info = deadline_module.deadline_servers_info[0] + deadline_url = default_dl_server_info["value"] - context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa + context.data["deadline"] = {} + context.data["deadline"]["defaultUrl"] = ( + deadline_url.strip().rstrip("/")) diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py new file mode 100644 index 0000000000..99d75ecb9e --- /dev/null +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +"""Collect user credentials + +Requires: + context -> project_settings + instance.data["deadline"]["url"] + +Provides: + instance.data["deadline"] -> require_authentication (bool) + instance.data["deadline"] -> auth (tuple (str, str)) - + (username, password) or None +""" +import pyblish.api + +from ayon_api import get_server_api_connection +from ayon_core.modules.deadline.deadline_module import DeadlineModule +from ayon_core.modules.deadline import __version__ + + +class CollectDeadlineUserCredentials(pyblish.api.InstancePlugin): + """Collects user name and password for artist if DL requires authentication + """ + order = pyblish.api.CollectorOrder + 0.250 + label = "Collect Deadline User Credentials" + + targets = ["local"] + hosts = ["aftereffects", + "blender", + "fusion", + "harmony", + "nuke", + "maya", + "max", + "houdini"] + + families = ["render", + "rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender", + "usdrender", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop", + "publish.hou"] + + def process(self, instance): + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + collected_deadline_url = instance.data["deadline"]["url"] + if not collected_deadline_url: + raise ValueError("Instance doesn't have '[deadline][url]'.") + context_data = instance.context.data + deadline_settings = context_data["project_settings"]["deadline"] + + deadline_server_name = None + # deadline url might be set directly from instance, need to find + # metadata for it + for deadline_info in deadline_settings["deadline_urls"]: + dl_settings_url = deadline_info["value"].strip().rstrip("/") + if dl_settings_url == collected_deadline_url: + deadline_server_name = deadline_info["name"] + break + + if not deadline_server_name: + raise ValueError(f"Collected {collected_deadline_url} doesn't " + "match any site configured in Studio Settings") + + instance.data["deadline"]["require_authentication"] = ( + deadline_info["require_authentication"] + ) + instance.data["deadline"]["auth"] = None + + instance.data["deadline"]["verify"] = ( + not deadline_info["not_verify_ssl"]) + + if not deadline_info["require_authentication"]: + return + # TODO import 'get_addon_site_settings' when available + # in public 'ayon_api' + local_settings = get_server_api_connection().get_addon_site_settings( + DeadlineModule.name, __version__) + local_settings = local_settings["local_settings"] + for server_info in local_settings: + if deadline_server_name == server_info["server_name"]: + instance.data["deadline"]["auth"] = (server_info["username"], + server_info["password"]) diff --git a/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml new file mode 100644 index 0000000000..eec05df08a --- /dev/null +++ b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml @@ -0,0 +1,17 @@ + + + + Deadline Authentication + +## Deadline authentication is required + +This project has set in Settings that Deadline requires authentication. + +### How to repair? + +Please go to Ayon Server > Site Settings and provide your Deadline username and password. +In some cases the password may be empty if Deadline is configured to allow that. Ask your administrator. + + + + \ No newline at end of file diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py index ab342c1a9d..311dbcedd5 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py @@ -174,7 +174,9 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, instance.data["toBeRenderedOn"] = "deadline" payload = self.assemble_payload() - return self.submit(payload) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + return self.submit(payload, auth=auth, verify=verify) def from_published_scene(self): """ diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py index 1fae23c9b2..a17bf0c3ef 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py @@ -2,9 +2,10 @@ import os import re import json import getpass -import requests import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post + class CelactionSubmitDeadline(pyblish.api.InstancePlugin): """Submit CelAction2D scene to Deadline @@ -30,11 +31,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin): context = instance.context - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" self.deadline_url = "{}/api/jobs".format(deadline_url) @@ -196,8 +193,11 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin): self.expected_files(instance, render_path) self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) - - response = requests.post(self.deadline_url, json=payload) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post(self.deadline_url, json=payload, + auth=auth, + verify=verify) if not response.ok: self.log.error( diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py index e3a4cd8030..6c70119628 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py @@ -2,17 +2,13 @@ import os import json import getpass -import requests - import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline.publish import ( AYONPyblishPluginMixin ) -from ayon_core.lib import ( - BoolDef, - NumberDef, -) +from ayon_core.lib import NumberDef class FusionSubmitDeadline( @@ -64,11 +60,6 @@ class FusionSubmitDeadline( decimals=0, minimum=1, maximum=10 - ), - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" ) ] @@ -80,10 +71,6 @@ class FusionSubmitDeadline( attribute_values = self.get_attr_values_from_data( instance.data) - # add suspend_publish attributeValue to instance data - instance.data["suspend_publish"] = attribute_values[ - "suspend_publish"] - context = instance.context key = "__hasRun{}".format(self.__class__.__name__) @@ -94,11 +81,7 @@ class FusionSubmitDeadline( from ayon_core.hosts.fusion.api.lib import get_frame_path - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" # Collect all saver instances in context that are to be rendered @@ -258,7 +241,9 @@ class FusionSubmitDeadline( # E.g. http://192.168.0.1:8082/api/jobs url = "{}/api/jobs".format(deadline_url) - response = requests.post(url, json=payload) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post(url, json=payload, auth=auth, verify=verify) if not response.ok: raise Exception(response.text) diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index b640869706..1e9846df0c 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -10,7 +10,6 @@ from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from ayon_core.lib import ( is_in_tests, - BoolDef, TextDef, NumberDef ) @@ -108,15 +107,10 @@ class HoudiniSubmitDeadline( priority = 50 chunk_size = 1 group = "" - + @classmethod def get_attribute_defs(cls): return [ - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" - ), NumberDef( "priority", label="Priority", @@ -222,7 +216,7 @@ class HoudiniSubmitDeadline( job_info.Pool = instance.data.get("primaryPool") job_info.SecondaryPool = instance.data.get("secondaryPool") - + if split_render_job and is_export_job: job_info.Priority = attribute_values.get( "export_priority", self.export_priority @@ -353,6 +347,11 @@ class HoudiniSubmitDeadline( return attr.asdict(plugin_info) def process(self, instance): + if not instance.data["farm"]: + self.log.debug("Render on farm is disabled. " + "Skipping deadline submission.") + return + super(HoudiniSubmitDeadline, self).process(instance) # TODO: Avoid the need for this logic here, needed for submit publish diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py index cba05f6948..ababb01285 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py @@ -181,17 +181,27 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, self.log.debug("Submitting 3dsMax render..") project_settings = instance.context.data["project_settings"] + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] if instance.data.get("multiCamera"): self.log.debug("Submitting jobs for multiple cameras..") payload = self._use_published_name_for_multiples( payload_data, project_settings) job_infos, plugin_infos = payload for job_info, plugin_info in zip(job_infos, plugin_infos): - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit( + self.assemble_payload(job_info, plugin_info), + auth=auth, + verify=verify + ) else: payload = self._use_published_name(payload_data, project_settings) job_info, plugin_info = payload - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit( + self.assemble_payload(job_info, plugin_info), + auth=auth, + verify=verify + ) def _use_published_name(self, data, project_settings): # Not all hosts can import these modules. diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py index 0300b12104..f1bc1cb2be 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -292,7 +292,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, return plugin_payload - def process_submission(self): + def process_submission(self, auth=None, verify=True): from maya import cmds instance = self._instance @@ -332,7 +332,10 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, if "vrayscene" in instance.data["families"]: self.log.debug("Submitting V-Ray scene render..") vray_export_payload = self._get_vray_export_payload(payload_data) - export_job = self.submit(vray_export_payload) + + export_job = self.submit(vray_export_payload, + auth=auth, + verify=verify) payload = self._get_vray_render_payload(payload_data) @@ -351,7 +354,9 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, else: # Submit main render job job_info, plugin_info = payload - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit(self.assemble_payload(job_info, plugin_info), + auth=auth, + verify=verify) def _tile_render(self, payload): """Submit as tile render per frame with dependent assembly jobs.""" @@ -451,7 +456,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # Submit frame tile jobs frame_tile_job_id = {} for frame, tile_job_payload in frame_payloads.items(): - job_id = self.submit(tile_job_payload) + job_id = self.submit(tile_job_payload, + instance.data["deadline"]["auth"]) frame_tile_job_id[frame] = job_id # Define assembly payloads @@ -554,12 +560,18 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # Submit assembly jobs assembly_job_ids = [] num_assemblies = len(assembly_payloads) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] for i, payload in enumerate(assembly_payloads): self.log.debug( "submitting assembly job {} of {}".format(i + 1, num_assemblies) ) - assembly_job_id = self.submit(payload) + assembly_job_id = self.submit( + payload, + auth=auth, + verify=verify + ) assembly_job_ids.append(assembly_job_id) instance.data["assemblySubmissionJobs"] = assembly_job_ids diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py index d70cb75bf3..db35c2ae67 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -4,9 +4,9 @@ import json import getpass from datetime import datetime -import requests import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline.publish import ( AYONPyblishPluginMixin ) @@ -76,11 +76,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, default=cls.use_gpu, label="Use GPU" ), - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" - ), BoolDef( "workfile_dependency", default=cls.workfile_dependency, @@ -100,20 +95,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, instance.data["attributeValues"] = self.get_attr_values_from_data( instance.data) - # add suspend_publish attributeValue to instance data - instance.data["suspend_publish"] = instance.data["attributeValues"][ - "suspend_publish"] - families = instance.data["families"] node = instance.data["transientData"]["node"] context = instance.context - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" self.deadline_url = "{}/api/jobs".format(deadline_url) @@ -436,7 +423,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) - response = requests.post(self.deadline_url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post(self.deadline_url, + json=payload, + timeout=10, + auth=auth, + verify=verify) if not response.ok: raise Exception(response.text) diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py index 4e4657d886..103f1355da 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py @@ -5,10 +5,10 @@ import json import re from copy import deepcopy -import requests import ayon_api import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline import publish from ayon_core.lib import EnumDef, is_in_tests from ayon_core.pipeline.version_start import get_versioning_start @@ -147,9 +147,6 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, instance_settings = self.get_attr_values_from_data(instance.data) initial_status = instance_settings.get("publishJobState", "Active") - # TODO: Remove this backwards compatibility of `suspend_publish` - if instance.data.get("suspend_publish"): - initial_status = "Suspended" args = [ "--headless", @@ -212,7 +209,10 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, self.log.debug("Submitting Deadline publish job ...") url = "{}/api/jobs".format(self.deadline_url) - response = requests.post(url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post( + url, json=payload, timeout=10, auth=auth, verify=verify) if not response.ok: raise Exception(response.text) @@ -344,11 +344,7 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, deadline_publish_job_id = None if submission_type == "deadline": - # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") + self.deadline_url = instance.data["deadline"]["url"] assert self.deadline_url, "Requires Deadline Webservice URL" deadline_publish_job_id = \ @@ -356,7 +352,9 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, # Inject deadline url to instances. for inst in instances: - inst["deadlineUrl"] = self.deadline_url + if "deadline" not in inst: + inst["deadline"] = {} + inst["deadline"] = instance.data["deadline"] # publish job file publish_job = { diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py index f3b68b4258..b2cc554de4 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py @@ -5,11 +5,11 @@ import json import re from copy import deepcopy -import requests import clique import ayon_api import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline import publish from ayon_core.lib import EnumDef, is_in_tests from ayon_core.pipeline.version_start import get_versioning_start @@ -88,9 +88,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, hosts = ["fusion", "max", "maya", "nuke", "houdini", "celaction", "aftereffects", "harmony", "blender"] - families = ["render.farm", "render.frames_farm", - "prerender.farm", "prerender.frames_farm", - "renderlayer", "imagesequence", + families = ["render", "render.farm", "render.frames_farm", + "prerender", "prerender.farm", "prerender.frames_farm", + "renderlayer", "imagesequence", "image", "vrayscene", "maxrender", "arnold_rop", "mantra_rop", "karma_rop", "vray_rop", @@ -224,9 +224,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, instance_settings = self.get_attr_values_from_data(instance.data) initial_status = instance_settings.get("publishJobState", "Active") - # TODO: Remove this backwards compatibility of `suspend_publish` - if instance.data.get("suspend_publish"): - initial_status = "Suspended" args = [ "--headless", @@ -306,7 +303,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, self.log.debug("Submitting Deadline publish job ...") url = "{}/api/jobs".format(self.deadline_url) - response = requests.post(url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post( + url, json=payload, timeout=10, auth=auth, verify=verify) if not response.ok: raise Exception(response.text) @@ -314,7 +314,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, return deadline_publish_job_id - def process(self, instance): # type: (pyblish.api.Instance) -> None """Process plugin. @@ -461,18 +460,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, } # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") + self.deadline_url = instance.data["deadline"]["url"] assert self.deadline_url, "Requires Deadline Webservice URL" deadline_publish_job_id = \ self._submit_deadline_post_job(instance, render_job, instances) - # Inject deadline url to instances. + # Inject deadline url to instances to query DL for job id for overrides for inst in instances: - inst["deadlineUrl"] = self.deadline_url + inst["deadline"] = instance.data["deadline"] # publish job file publish_job = { diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py index a7b300beff..8fffd47786 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py @@ -1,5 +1,7 @@ import pyblish.api +from ayon_core.pipeline import PublishXmlValidationError + from openpype_modules.deadline.abstract_submit_deadline import requests_get @@ -8,27 +10,42 @@ class ValidateDeadlineConnection(pyblish.api.InstancePlugin): label = "Validate Deadline Web Service" order = pyblish.api.ValidatorOrder - hosts = ["maya", "nuke"] - families = ["renderlayer", "render"] + hosts = ["maya", "nuke", "aftereffects", "harmony", "fusion"] + families = ["renderlayer", "render", "render.farm"] # cache responses = {} def process(self, instance): - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") - self.log.debug( - "We have deadline URL on instance {}".format(deadline_url) - ) + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" + kwargs = {} + if instance.data["deadline"]["require_authentication"]: + auth = instance.data["deadline"]["auth"] + kwargs["auth"] = auth + + if not auth[0]: + raise PublishXmlValidationError( + self, + "Deadline requires authentication. " + "At least username is required to be set in " + "Site Settings.") + if deadline_url not in self.responses: - self.responses[deadline_url] = requests_get(deadline_url) + self.responses[deadline_url] = requests_get(deadline_url, **kwargs) response = self.responses[deadline_url] + if response.status_code == 401: + raise PublishXmlValidationError( + self, + "Deadline requires authentication. " + "Provided credentials are not working. " + "Please change them in Site Settings") assert response.ok, "Response must be ok" assert response.text.startswith("Deadline Web Service "), ( "Web service did not respond with 'Deadline Web Service'" diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py index 2feb044cf1..2fb511bf51 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -37,8 +37,9 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, self.log.debug("Skipping local instance.") return - deadline_url = self.get_deadline_url(instance) - pools = self.get_pools(deadline_url) + deadline_url = instance.data["deadline"]["url"] + pools = self.get_pools(deadline_url, + instance.data["deadline"].get("auth")) invalid_pools = {} primary_pool = instance.data.get("primaryPool") @@ -61,22 +62,18 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, formatting_data={"pools_str": ", ".join(pools)} ) - def get_deadline_url(self, instance): - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - if instance.data.get("deadlineUrl"): - # if custom one is set in instance, use that - deadline_url = instance.data.get("deadlineUrl") - return deadline_url - - def get_pools(self, deadline_url): + def get_pools(self, deadline_url, auth): if deadline_url not in self.pools_per_url: self.log.debug( "Querying available pools for Deadline url: {}".format( deadline_url) ) pools = DeadlineModule.get_deadline_pools(deadline_url, + auth=auth, log=self.log) + # some DL return "none" as a pool name + if "none" not in pools: + pools.append("none") self.log.info("Available pools: {}".format(pools)) self.pools_per_url[deadline_url] = pools diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index 6263526d5c..83e867408c 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -199,16 +199,16 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin): (dict): Job info from Deadline """ - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" url = "{}/api/jobs?JobID={}".format(deadline_url, job_id) try: - response = requests_get(url) + kwargs = {} + auth = instance.data["deadline"]["auth"] + if auth: + kwargs["auth"] = auth + response = requests_get(url, **kwargs) except requests.exceptions.ConnectionError: self.log.error("Deadline is not accessible at " "{}".format(deadline_url)) diff --git a/server_addon/deadline/server/version.py b/client/ayon_core/modules/deadline/version.py similarity index 100% rename from server_addon/deadline/server/version.py rename to client/ayon_core/modules/deadline/version.py diff --git a/client/ayon_core/modules/royalrender/api.py b/client/ayon_core/modules/royalrender/api.py index a69f88c43c..ef715811c5 100644 --- a/client/ayon_core/modules/royalrender/api.py +++ b/client/ayon_core/modules/royalrender/api.py @@ -7,7 +7,7 @@ from ayon_core.lib import Logger, run_subprocess, AYONSettingsRegistry from ayon_core.lib.vendor_bin_utils import find_tool_in_custom_paths from .rr_job import SubmitFile -from .rr_job import RRjob, SubmitterParameter # noqa F401 +from .rr_job import RRJob, SubmitterParameter # noqa F401 class Api: diff --git a/client/ayon_core/pipeline/__init__.py b/client/ayon_core/pipeline/__init__.py index d1a181a353..8fd00ee6b6 100644 --- a/client/ayon_core/pipeline/__init__.py +++ b/client/ayon_core/pipeline/__init__.py @@ -97,6 +97,15 @@ from .context_tools import ( get_current_folder_path, get_current_task_name ) + +from .workfile import ( + discover_workfile_build_plugins, + register_workfile_build_plugin, + deregister_workfile_build_plugin, + register_workfile_build_plugin_path, + deregister_workfile_build_plugin_path, +) + install = install_host uninstall = uninstall_host @@ -198,6 +207,13 @@ __all__ = ( "get_current_folder_path", "get_current_task_name", + # Workfile templates + "discover_workfile_build_plugins", + "register_workfile_build_plugin", + "deregister_workfile_build_plugin", + "register_workfile_build_plugin_path", + "deregister_workfile_build_plugin_path", + # Backwards compatible function names "install", "uninstall", diff --git a/client/ayon_core/pipeline/anatomy/anatomy.py b/client/ayon_core/pipeline/anatomy/anatomy.py index 2aa8eeddbc..98bbaa9bdc 100644 --- a/client/ayon_core/pipeline/anatomy/anatomy.py +++ b/client/ayon_core/pipeline/anatomy/anatomy.py @@ -3,11 +3,16 @@ import re import copy import platform import collections -import time import ayon_api -from ayon_core.lib import Logger, get_local_site_id, StringTemplate +from ayon_core.lib import ( + Logger, + get_local_site_id, + StringTemplate, + CacheItem, + NestedCacheItem, +) from ayon_core.addon import AddonsManager from .exceptions import RootCombinationError, ProjectNotSet @@ -397,62 +402,11 @@ class BaseAnatomy(object): ) -class CacheItem: - """Helper to cache data. - - Helper does not handle refresh of data and does not mark data as outdated. - Who uses the object should check of outdated state on his own will. - """ - - default_lifetime = 10 - - def __init__(self, lifetime=None): - self._data = None - self._cached = None - self._lifetime = lifetime or self.default_lifetime - - @property - def data(self): - """Cached data/object. - - Returns: - Any: Whatever was cached. - """ - - return self._data - - @property - def is_outdated(self): - """Item has outdated cache. - - Lifetime of cache item expired or was not yet set. - - Returns: - bool: Item is outdated. - """ - - if self._cached is None: - return True - return (time.time() - self._cached) > self._lifetime - - def update_data(self, data): - """Update cache of data. - - Args: - data (Any): Data to cache. - """ - - self._data = data - self._cached = time.time() - - class Anatomy(BaseAnatomy): - _sitesync_addon_cache = CacheItem() - _project_cache = collections.defaultdict(CacheItem) - _default_site_id_cache = collections.defaultdict(CacheItem) - _root_overrides_cache = collections.defaultdict( - lambda: collections.defaultdict(CacheItem) - ) + _project_cache = NestedCacheItem(lifetime=10) + _sitesync_addon_cache = CacheItem(lifetime=60) + _default_site_id_cache = NestedCacheItem(lifetime=60) + _root_overrides_cache = NestedCacheItem(2, lifetime=60) def __init__( self, project_name=None, site_name=None, project_entity=None @@ -477,18 +431,18 @@ class Anatomy(BaseAnatomy): @classmethod def get_project_entity_from_cache(cls, project_name): project_cache = cls._project_cache[project_name] - if project_cache.is_outdated: + if not project_cache.is_valid: project_cache.update_data(ayon_api.get_project(project_name)) - return copy.deepcopy(project_cache.data) + return copy.deepcopy(project_cache.get_data()) @classmethod def get_sitesync_addon(cls): - if cls._sitesync_addon_cache.is_outdated: + if not cls._sitesync_addon_cache.is_valid: manager = AddonsManager() cls._sitesync_addon_cache.update_data( manager.get_enabled_addon("sitesync") ) - return cls._sitesync_addon_cache.data + return cls._sitesync_addon_cache.get_data() @classmethod def _get_studio_roots_overrides(cls, project_name): @@ -533,14 +487,14 @@ class Anatomy(BaseAnatomy): elif not site_name: # Use sync server to receive active site name project_cache = cls._default_site_id_cache[project_name] - if project_cache.is_outdated: + if not project_cache.is_valid: project_cache.update_data( sitesync_addon.get_active_site_type(project_name) ) - site_name = project_cache.data + site_name = project_cache.get_data() site_cache = cls._root_overrides_cache[project_name][site_name] - if site_cache.is_outdated: + if not site_cache.is_valid: if site_name == "studio": # Handle studio root overrides without sync server # - studio root overrides can be done even without sync server @@ -553,4 +507,4 @@ class Anatomy(BaseAnatomy): project_name, site_name ) site_cache.update_data(roots_overrides) - return site_cache.data + return site_cache.get_data() diff --git a/client/ayon_core/pipeline/colorspace.py b/client/ayon_core/pipeline/colorspace.py index efa3bbf968..099616ff4a 100644 --- a/client/ayon_core/pipeline/colorspace.py +++ b/client/ayon_core/pipeline/colorspace.py @@ -8,16 +8,20 @@ import tempfile import warnings from copy import deepcopy +import ayon_api + from ayon_core import AYON_CORE_ROOT from ayon_core.settings import get_project_settings from ayon_core.lib import ( + filter_profiles, StringTemplate, run_ayon_launcher_process, - Logger + Logger, ) -from ayon_core.pipeline import Anatomy from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS - +from ayon_core.pipeline import Anatomy +from ayon_core.pipeline.template_data import get_template_data +from ayon_core.pipeline.load import get_representation_path_with_anatomy log = Logger.get_logger(__name__) @@ -32,10 +36,6 @@ class CachedData: } -class DeprecatedWarning(DeprecationWarning): - pass - - def deprecated(new_destination): """Mark functions as deprecated. @@ -60,13 +60,13 @@ def deprecated(new_destination): @functools.wraps(decorated_func) def wrapper(*args, **kwargs): - warnings.simplefilter("always", DeprecatedWarning) + warnings.simplefilter("always", DeprecationWarning) warnings.warn( ( "Call to deprecated function '{}'" "\nFunction was moved or removed.{}" ).format(decorated_func.__name__, warning_message), - category=DeprecatedWarning, + category=DeprecationWarning, stacklevel=4 ) return decorated_func(*args, **kwargs) @@ -81,28 +81,54 @@ def deprecated(new_destination): def _make_temp_json_file(): """Wrapping function for json temp file """ + temporary_json_file = None try: # Store dumped json to temporary file - temporary_json_file = tempfile.NamedTemporaryFile( + with tempfile.NamedTemporaryFile( mode="w", suffix=".json", delete=False - ) - temporary_json_file.close() - temporary_json_filepath = temporary_json_file.name.replace( - "\\", "/" - ) + ) as tmpfile: + temporary_json_filepath = tmpfile.name.replace("\\", "/") yield temporary_json_filepath - except IOError as _error: + except IOError as exc: raise IOError( - "Unable to create temp json file: {}".format( - _error - ) + "Unable to create temp json file: {}".format(exc) ) finally: # Remove the temporary json - os.remove(temporary_json_filepath) + if temporary_json_file is not None: + os.remove(temporary_json_filepath) + + +def has_compatible_ocio_package(): + """Current process has available compatible 'PyOpenColorIO'. + + Returns: + bool: True if compatible package is available. + + """ + if CachedData.has_compatible_ocio_package is not None: + return CachedData.has_compatible_ocio_package + + is_compatible = False + try: + import PyOpenColorIO + + # Check if PyOpenColorIO is compatible + # - version 2.0.0 or higher is required + # NOTE version 1 does not have '__version__' attribute + if hasattr(PyOpenColorIO, "__version__"): + version_parts = PyOpenColorIO.__version__.split(".") + major = int(version_parts[0]) + is_compatible = (major, ) >= (2, ) + except ImportError: + pass + + CachedData.has_compatible_ocio_package = is_compatible + # compatible + return CachedData.has_compatible_ocio_package def get_ocio_config_script_path(): @@ -110,53 +136,58 @@ def get_ocio_config_script_path(): Returns: str: path string + """ - return os.path.normpath( - os.path.join( - AYON_CORE_ROOT, - "scripts", - "ocio_wrapper.py" - ) + return os.path.join( + os.path.normpath(AYON_CORE_ROOT), + "scripts", + "ocio_wrapper.py" ) def get_colorspace_name_from_filepath( - filepath, host_name, project_name, - config_data=None, file_rules=None, + filepath, + host_name, + project_name, + config_data, + file_rules=None, project_settings=None, validate=True ): """Get colorspace name from filepath Args: - filepath (str): path string, file rule pattern is tested on it - host_name (str): host name - project_name (str): project name - config_data (Optional[dict]): config path and template in dict. - Defaults to None. - file_rules (Optional[dict]): file rule data from settings. - Defaults to None. - project_settings (Optional[dict]): project settings. Defaults to None. + filepath (str): Path string, file rule pattern is tested on it. + host_name (str): Host name. + project_name (str): Project name. + config_data (dict): Config path and template in dict. + file_rules (Optional[dict]): File rule data from settings. + project_settings (Optional[dict]): Project settings. validate (Optional[bool]): should resulting colorspace be validated - with config file? Defaults to True. + with config file? Defaults to True. Returns: - str: name of colorspace - """ - project_settings, config_data, file_rules = _get_context_settings( - host_name, project_name, - config_data=config_data, file_rules=file_rules, - project_settings=project_settings - ) + Union[str, None]: name of colorspace + """ if not config_data: # in case global or host color management is not enabled return None + if file_rules is None: + if project_settings is None: + project_settings = get_project_settings(project_name) + file_rules = get_imageio_file_rules( + project_name, host_name, project_settings + ) + # use ImageIO file rules colorspace_name = get_imageio_file_rules_colorspace_from_filepath( - filepath, host_name, project_name, - config_data=config_data, file_rules=file_rules, + filepath, + host_name, + project_name, + config_data=config_data, + file_rules=file_rules, project_settings=project_settings ) @@ -182,47 +213,18 @@ def get_colorspace_name_from_filepath( # validate matching colorspace with config if validate: validate_imageio_colorspace_in_config( - config_data["path"], colorspace_name) + config_data["path"], colorspace_name + ) return colorspace_name -# TODO: remove this in future - backward compatibility -@deprecated("get_imageio_file_rules_colorspace_from_filepath") -def get_imageio_colorspace_from_filepath(*args, **kwargs): - return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) - -# TODO: remove this in future - backward compatibility -@deprecated("get_imageio_file_rules_colorspace_from_filepath") -def get_colorspace_from_filepath(*args, **kwargs): - return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) - - -def _get_context_settings( - host_name, project_name, - config_data=None, file_rules=None, - project_settings=None -): - project_settings = project_settings or get_project_settings( - project_name - ) - - config_data = config_data or get_imageio_config( - project_name, host_name, project_settings) - - # in case host color management is not enabled - if not config_data: - return (None, None, None) - - file_rules = file_rules or get_imageio_file_rules( - project_name, host_name, project_settings) - - return project_settings, config_data, file_rules - - def get_imageio_file_rules_colorspace_from_filepath( - filepath, host_name, project_name, - config_data=None, file_rules=None, + filepath, + host_name, + project_name, + config_data, + file_rules=None, project_settings=None ): """Get colorspace name from filepath @@ -230,28 +232,28 @@ def get_imageio_file_rules_colorspace_from_filepath( ImageIO Settings file rules are tested for matching rule. Args: - filepath (str): path string, file rule pattern is tested on it - host_name (str): host name - project_name (str): project name - config_data (Optional[dict]): config path and template in dict. - Defaults to None. - file_rules (Optional[dict]): file rule data from settings. - Defaults to None. - project_settings (Optional[dict]): project settings. Defaults to None. + filepath (str): Path string, file rule pattern is tested on it. + host_name (str): Host name. + project_name (str): Project name. + config_data (dict): Config path and template in dict. + file_rules (Optional[dict]): File rule data from settings. + project_settings (Optional[dict]): Project settings. Returns: - str: name of colorspace - """ - project_settings, config_data, file_rules = _get_context_settings( - host_name, project_name, - config_data=config_data, file_rules=file_rules, - project_settings=project_settings - ) + Union[str, None]: Name of colorspace. + """ if not config_data: # in case global or host color management is not enabled return None + if file_rules is None: + if project_settings is None: + project_settings = get_project_settings(project_name) + file_rules = get_imageio_file_rules( + project_name, host_name, project_settings + ) + # match file rule from path colorspace_name = None for file_rule in file_rules: @@ -282,26 +284,48 @@ def get_config_file_rules_colorspace_from_filepath(config_path, filepath): Returns: Union[str, None]: matching colorspace name + """ - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess + if has_compatible_ocio_package(): + result_data = _get_config_file_rules_colorspace_from_filepath( + config_path, filepath + ) + else: result_data = _get_wrapped_with_subprocess( - "colorspace", "get_config_file_rules_colorspace_from_filepath", + "get_config_file_rules_colorspace_from_filepath", config_path=config_path, filepath=filepath ) - if result_data: - return result_data[0] - - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_config_file_rules_colorspace_from_filepath # noqa: E501 - - result_data = _get_config_file_rules_colorspace_from_filepath( - config_path, filepath) if result_data: return result_data[0] + return None + + +def get_config_version_data(config_path): + """Return major and minor version info. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: minor and major keys with values + + """ + if config_path not in CachedData.config_version_data: + if has_compatible_ocio_package(): + version_data = _get_config_version_data(config_path) + else: + version_data = _get_wrapped_with_subprocess( + "get_config_version_data", + config_path=config_path + ) + CachedData.config_version_data[config_path] = version_data + + return deepcopy(CachedData.config_version_data[config_path]) def parse_colorspace_from_filepath( @@ -344,10 +368,10 @@ def parse_colorspace_from_filepath( pattern = "|".join( # Allow to match spaces also as underscores because the # integrator replaces spaces with underscores in filenames - re.escape(colorspace) for colorspace in + re.escape(colorspace) # Sort by longest first so the regex matches longer matches # over smaller matches, e.g. matching 'Output - sRGB' over 'sRGB' - sorted(colorspaces, key=len, reverse=True) + for colorspace in sorted(colorspaces, key=len, reverse=True) ) return re.compile(pattern) @@ -395,6 +419,7 @@ def validate_imageio_colorspace_in_config(config_path, colorspace_name): Returns: bool: True if exists + """ colorspaces = get_ocio_config_colorspaces(config_path)["colorspaces"] if colorspace_name not in colorspaces: @@ -405,28 +430,10 @@ def validate_imageio_colorspace_in_config(config_path, colorspace_name): return True -# TODO: remove this in future - backward compatibility -@deprecated("_get_wrapped_with_subprocess") -def get_data_subprocess(config_path, data_type): - """[Deprecated] Get data via subprocess - - Wrapper for Python 2 hosts. +def _get_wrapped_with_subprocess(command, **kwargs): + """Get data via subprocess. Args: - config_path (str): path leading to config.ocio file - """ - return _get_wrapped_with_subprocess( - "config", data_type, in_path=config_path, - ) - - -def _get_wrapped_with_subprocess(command_group, command, **kwargs): - """Get data via subprocess - - Wrapper for Python 2 hosts. - - Args: - command_group (str): command group name command (str): command name **kwargs: command arguments @@ -436,14 +443,15 @@ def _get_wrapped_with_subprocess(command_group, command, **kwargs): with _make_temp_json_file() as tmp_json_path: # Prepare subprocess arguments args = [ - "run", get_ocio_config_script_path(), - command_group, command + "run", + get_ocio_config_script_path(), + command ] - for key_, value_ in kwargs.items(): - args.extend(("--{}".format(key_), value_)) + for key, value in kwargs.items(): + args.extend(("--{}".format(key), value)) - args.append("--out_path") + args.append("--output_path") args.append(tmp_json_path) log.info("Executing: {}".format(" ".join(args))) @@ -451,55 +459,23 @@ def _get_wrapped_with_subprocess(command_group, command, **kwargs): run_ayon_launcher_process(*args, logger=log) # return all colorspaces - with open(tmp_json_path, "r") as f_: - return json.load(f_) + with open(tmp_json_path, "r") as stream: + return json.load(stream) -# TODO: this should be part of ocio_wrapper.py -def compatibility_check(): - """Making sure PyOpenColorIO is importable""" - if CachedData.has_compatible_ocio_package is not None: - return CachedData.has_compatible_ocio_package - - try: - import PyOpenColorIO # noqa: F401 - CachedData.has_compatible_ocio_package = True - except ImportError: - CachedData.has_compatible_ocio_package = False - - # compatible - return CachedData.has_compatible_ocio_package - - -# TODO: this should be part of ocio_wrapper.py def compatibility_check_config_version(config_path, major=1, minor=None): """Making sure PyOpenColorIO config version is compatible""" - if not CachedData.config_version_data.get(config_path): - if compatibility_check(): - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_version_data - - CachedData.config_version_data[config_path] = \ - _get_version_data(config_path) - - else: - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - CachedData.config_version_data[config_path] = \ - _get_wrapped_with_subprocess( - "config", "get_version", config_path=config_path - ) + version_data = get_config_version_data(config_path) # check major version - if CachedData.config_version_data[config_path]["major"] != major: + if version_data["major"] != major: return False # check minor version - if minor and CachedData.config_version_data[config_path]["minor"] != minor: + if minor is not None and version_data["minor"] != minor: return False - # compatible return True @@ -514,23 +490,19 @@ def get_ocio_config_colorspaces(config_path): Returns: dict: colorspace and family in couple + """ - if not CachedData.ocio_config_colorspaces.get(config_path): - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - CachedData.ocio_config_colorspaces[config_path] = \ - _get_wrapped_with_subprocess( - "config", "get_colorspace", in_path=config_path - ) + if config_path not in CachedData.ocio_config_colorspaces: + if has_compatible_ocio_package(): + config_colorspaces = _get_ocio_config_colorspaces(config_path) else: - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_colorspace_data + config_colorspaces = _get_wrapped_with_subprocess( + "get_ocio_config_colorspaces", + config_path=config_path + ) + CachedData.ocio_config_colorspaces[config_path] = config_colorspaces - CachedData.ocio_config_colorspaces[config_path] = \ - _get_colorspace_data(config_path) - - return CachedData.ocio_config_colorspaces[config_path] + return deepcopy(CachedData.ocio_config_colorspaces[config_path]) def convert_colorspace_enumerator_item( @@ -540,11 +512,12 @@ def convert_colorspace_enumerator_item( """Convert colorspace enumerator item to dictionary Args: - colorspace_item (str): colorspace and family in couple - config_items (dict[str,dict]): colorspace data + colorspace_enum_item (str): Colorspace and family in couple. + config_items (dict[str,dict]): Colorspace data. Returns: dict: colorspace data + """ if "::" not in colorspace_enum_item: return None @@ -603,16 +576,18 @@ def get_colorspaces_enumerator_items( Families can be used for building menu and submenus in gui. Args: - config_items (dict[str,dict]): colorspace data coming from - `get_ocio_config_colorspaces` function - include_aliases (bool): include aliases in result - include_looks (bool): include looks in result - include_roles (bool): include roles in result + config_items (dict[str,dict]): Colorspace data coming from + `get_ocio_config_colorspaces` function. + include_aliases (Optional[bool]): Include aliases in result. + include_looks (Optional[bool]): Include looks in result. + include_roles (Optional[bool]): Include roles in result. + include_display_views (Optional[bool]): Include display views + in result. Returns: - list[tuple[str,str]]: colorspace and family in couple + list[tuple[str, str]]: Colorspace and family in couples. + """ - labeled_colorspaces = [] aliases = set() colorspaces = set() looks = set() @@ -622,86 +597,86 @@ def get_colorspaces_enumerator_items( if items_type == "colorspaces": for color_name, color_data in colorspace_items.items(): if color_data.get("aliases"): - aliases.update([ + aliases.update({ ( "aliases::{}".format(alias_name), "[alias] {} ({})".format(alias_name, color_name) ) for alias_name in color_data["aliases"] - ]) + }) colorspaces.add(( "{}::{}".format(items_type, color_name), "[colorspace] {}".format(color_name) )) elif items_type == "looks": - looks.update([ + looks.update({ ( "{}::{}".format(items_type, name), "[look] {} ({})".format(name, role_data["process_space"]) ) for name, role_data in colorspace_items.items() - ]) + }) elif items_type == "displays_views": - display_views.update([ + display_views.update({ ( "{}::{}".format(items_type, name), "[view (display)] {}".format(name) ) for name, _ in colorspace_items.items() - ]) + }) elif items_type == "roles": - roles.update([ + roles.update({ ( "{}::{}".format(items_type, name), "[role] {} ({})".format(name, role_data["colorspace"]) ) for name, role_data in colorspace_items.items() - ]) + }) - if roles and include_roles: - roles = sorted(roles, key=lambda x: x[0]) - labeled_colorspaces.extend(roles) + def _sort_key_getter(item): + """Use colorspace for sorting. - # add colorspaces as second so it is not first in menu - colorspaces = sorted(colorspaces, key=lambda x: x[0]) - labeled_colorspaces.extend(colorspaces) + Args: + item (tuple[str, str]): Item with colorspace and label. - if aliases and include_aliases: - aliases = sorted(aliases, key=lambda x: x[0]) - labeled_colorspaces.extend(aliases) + Returns: + str: Colorspace. - if looks and include_looks: - looks = sorted(looks, key=lambda x: x[0]) - labeled_colorspaces.extend(looks) + """ + return item[0] - if display_views and include_display_views: - display_views = sorted(display_views, key=lambda x: x[0]) - labeled_colorspaces.extend(display_views) + labeled_colorspaces = [] + if include_roles: + labeled_colorspaces.extend( + sorted(roles, key=_sort_key_getter) + ) + + # Add colorspaces after roles, so it is not first in menu + labeled_colorspaces.extend( + sorted(colorspaces, key=_sort_key_getter) + ) + + if include_aliases: + labeled_colorspaces.extend( + sorted(aliases, key=_sort_key_getter) + ) + + if include_looks: + labeled_colorspaces.extend( + sorted(looks, key=_sort_key_getter) + ) + + if include_display_views: + labeled_colorspaces.extend( + sorted(display_views, key=_sort_key_getter) + ) return labeled_colorspaces -# TODO: remove this in future - backward compatibility -@deprecated("_get_wrapped_with_subprocess") -def get_colorspace_data_subprocess(config_path): - """[Deprecated] Get colorspace data via subprocess - - Wrapper for Python 2 hosts. - - Args: - config_path (str): path leading to config.ocio file - - Returns: - dict: colorspace and family in couple - """ - return _get_wrapped_with_subprocess( - "config", "get_colorspace", in_path=config_path - ) - - def get_ocio_config_views(config_path): """Get all viewer data @@ -713,212 +688,346 @@ def get_ocio_config_views(config_path): Returns: dict: `display/viewer` and viewer data + """ - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - return _get_wrapped_with_subprocess( - "config", "get_views", in_path=config_path - ) + if has_compatible_ocio_package(): + return _get_ocio_config_views(config_path) - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_views_data - - return _get_views_data(config_path) - - -# TODO: remove this in future - backward compatibility -@deprecated("_get_wrapped_with_subprocess") -def get_views_data_subprocess(config_path): - """[Deprecated] Get viewers data via subprocess - - Wrapper for Python 2 hosts. - - Args: - config_path (str): path leading to config.ocio file - - Returns: - dict: `display/viewer` and viewer data - """ return _get_wrapped_with_subprocess( - "config", "get_views", in_path=config_path + "get_ocio_config_views", + config_path=config_path ) -def get_imageio_config( +def _get_global_config_data( project_name, host_name, - project_settings=None, - anatomy_data=None, + anatomy, + template_data, + imageio_global, + folder_id, + log, +): + """Get global config data. + + Global config from core settings is using profiles that are based on + host name, task name and task type. The filtered profile can define 3 + types of config sources: + 1. AYON ocio addon configs. + 2. Custom path to ocio config. + 3. Path to 'ocioconfig' representation on product. Name of product can be + defined in settings. Product name can be regex but exact match is + always preferred. + + None is returned when no profile is found, when path + + Args: + project_name (str): Project name. + host_name (str): Host name. + anatomy (Anatomy): Project anatomy object. + template_data (dict[str, Any]): Template data. + imageio_global (dict[str, Any]): Core imagio settings. + folder_id (Union[dict[str, Any], None]): Folder id. + log (logging.Logger): Logger object. + + Returns: + Union[dict[str, str], None]: Config data with path and template + or None. + + """ + task_name = task_type = None + task_data = template_data.get("task") + if task_data: + task_name = task_data["name"] + task_type = task_data["type"] + + filter_values = { + "task_names": task_name, + "task_types": task_type, + "host_names": host_name, + } + profile = filter_profiles( + imageio_global["ocio_config_profiles"], filter_values + ) + if profile is None: + log.info(f"No config profile matched filters {str(filter_values)}") + return None + + profile_type = profile["type"] + if profile_type in ("builtin_path", "custom_path"): + template = profile[profile_type] + result = StringTemplate.format_strict_template( + template, template_data + ) + normalized_path = str(result.normalized()) + if not os.path.exists(normalized_path): + log.warning(f"Path was not found '{normalized_path}'.") + return None + + return { + "path": normalized_path, + "template": template + } + + # TODO decide if this is the right name for representation + repre_name = "ocioconfig" + + folder_info = template_data.get("folder") + if not folder_info: + log.warning("Folder info is missing.") + return None + folder_path = folder_info["path"] + + product_name = profile["product_name"] + if folder_id is None: + folder_entity = ayon_api.get_folder_by_path( + project_name, folder_path, fields={"id"} + ) + if not folder_entity: + log.warning(f"Folder entity '{folder_path}' was not found..") + return None + folder_id = folder_entity["id"] + + product_entities_by_name = { + product_entity["name"]: product_entity + for product_entity in ayon_api.get_products( + project_name, + folder_ids={folder_id}, + product_name_regex=product_name, + fields={"id", "name"} + ) + } + if not product_entities_by_name: + log.debug( + f"No product entities were found for folder '{folder_path}' with" + f" product name filter '{product_name}'." + ) + return None + + # Try to use exact match first, otherwise use first available product + product_entity = product_entities_by_name.get(product_name) + if product_entity is None: + product_entity = next(iter(product_entities_by_name.values())) + + product_name = product_entity["name"] + # Find last product version + version_entity = ayon_api.get_last_version_by_product_id( + project_name, + product_id=product_entity["id"], + fields={"id"} + ) + if not version_entity: + log.info( + f"Product '{product_name}' does not have available any versions." + ) + return None + + # Find 'ocioconfig' representation entity + repre_entity = ayon_api.get_representation_by_name( + project_name, + representation_name=repre_name, + version_id=version_entity["id"], + ) + if not repre_entity: + log.debug( + f"Representation '{repre_name}'" + f" not found on product '{product_name}'." + ) + return None + + path = get_representation_path_with_anatomy(repre_entity, anatomy) + template = repre_entity["attrib"]["template"] + return { + "path": path, + "template": template, + } + + +def get_imageio_config_preset( + project_name, + folder_path, + task_name, + host_name, anatomy=None, - env=None + project_settings=None, + template_data=None, + env=None, + folder_id=None, ): """Returns config data from settings - Config path is formatted in `path` key - and original settings input is saved into `template` key. + Output contains 'path' key and 'template' key holds its template. + + Template data can be prepared with 'get_template_data'. Args: - project_name (str): project name - host_name (str): host name + project_name (str): Project name. + folder_path (str): Folder path. + task_name (str): Task name. + host_name (str): Host name. + anatomy (Optional[Anatomy]): Project anatomy object. project_settings (Optional[dict]): Project settings. - anatomy_data (Optional[dict]): anatomy formatting data. - anatomy (Optional[Anatomy]): Anatomy object. - env (Optional[dict]): Environment variables. + template_data (Optional[dict]): Template data used for + template formatting. + env (Optional[dict]): Environment variables. Environments are used + for template formatting too. Values from 'os.environ' are used + when not provided. + folder_id (Optional[str]): Folder id. Is used only when config path + is received from published representation. Is autofilled when + not provided. Returns: dict: config path data or empty dict + """ - project_settings = project_settings or get_project_settings(project_name) - anatomy = anatomy or Anatomy(project_name) - - if not anatomy_data: - from ayon_core.pipeline.context_tools import ( - get_current_context_template_data) - anatomy_data = get_current_context_template_data() - - formatting_data = deepcopy(anatomy_data) - - # Add project roots to anatomy data - formatting_data["root"] = anatomy.roots - formatting_data["platform"] = platform.system().lower() + if not project_settings: + project_settings = get_project_settings(project_name) # Get colorspace settings imageio_global, imageio_host = _get_imageio_settings( - project_settings, host_name) + project_settings, host_name + ) + # Global color management must be enabled to be able to use host settings + if not imageio_global["activate_global_color_management"]: + log.info("Colorspace management is disabled globally.") + return {} # Host 'ocio_config' is optional host_ocio_config = imageio_host.get("ocio_config") or {} - - # Global color management must be enabled to be able to use host settings - activate_color_management = imageio_global.get( - "activate_global_color_management") - # TODO: remove this in future - backward compatibility - # For already saved overrides from previous version look for 'enabled' - # on host settings. - if activate_color_management is None: - activate_color_management = host_ocio_config.get("enabled", False) - - if not activate_color_management: - # if global settings are disabled return empty dict because - # it is expected that no colorspace management is needed - log.info("Colorspace management is disabled globally.") - return {} + # TODO remove + # - backward compatibility when host settings had only 'enabled' flag + # the flag was split into 'activate_global_color_management' + # and 'override_global_config' + host_ocio_config_enabled = host_ocio_config.get("enabled", False) # Check if host settings group is having 'activate_host_color_management' # - if it does not have activation key then default it to True so it uses # global settings - # This is for backward compatibility. - # TODO: in future rewrite this to be more explicit activate_host_color_management = imageio_host.get( - "activate_host_color_management") - - # TODO: remove this in future - backward compatibility + "activate_host_color_management" + ) if activate_host_color_management is None: - activate_host_color_management = host_ocio_config.get("enabled", False) + activate_host_color_management = host_ocio_config_enabled if not activate_host_color_management: # if host settings are disabled return False because # it is expected that no colorspace management is needed log.info( - "Colorspace management for host '{}' is disabled.".format( - host_name) + f"Colorspace management for host '{host_name}' is disabled." ) return {} - # get config path from either global or host settings - # depending on override flag + project_entity = None + if anatomy is None: + project_entity = ayon_api.get_project(project_name) + anatomy = Anatomy(project_name, project_entity=project_entity) + + if env is None: + env = dict(os.environ.items()) + + if template_data: + template_data = deepcopy(template_data) + else: + if not project_entity: + project_entity = ayon_api.get_project(project_name) + + folder_entity = task_entity = folder_id = None + if folder_path: + folder_entity = ayon_api.get_folder_by_path( + project_name, folder_path + ) + folder_id = folder_entity["id"] + + if folder_id and task_name: + task_entity = ayon_api.get_task_by_name( + project_name, folder_id, task_name + ) + template_data = get_template_data( + project_entity, + folder_entity, + task_entity, + host_name, + project_settings, + ) + + # Add project roots to anatomy data + template_data["root"] = anatomy.roots + template_data["platform"] = platform.system().lower() + + # Add environment variables to template data + template_data.update(env) + + # Get config path from core or host settings + # - based on override flag in host settings # TODO: in future rewrite this to be more explicit override_global_config = host_ocio_config.get("override_global_config") if override_global_config is None: - # for already saved overrides from previous version - # TODO: remove this in future - backward compatibility - override_global_config = host_ocio_config.get("enabled") + override_global_config = host_ocio_config_enabled - if override_global_config: - config_data = _get_config_data( - host_ocio_config["filepath"], formatting_data, env + if not override_global_config: + config_data = _get_global_config_data( + project_name, + host_name, + anatomy, + template_data, + imageio_global, + folder_id, + log, ) else: - # get config path from global - config_global = imageio_global["ocio_config"] - config_data = _get_config_data( - config_global["filepath"], formatting_data, env + config_data = _get_host_config_data( + host_ocio_config["filepath"], template_data ) if not config_data: raise FileExistsError( - "No OCIO config found in settings. It is " - "either missing or there is typo in path inputs" + "No OCIO config found in settings. It is" + " either missing or there is typo in path inputs" ) return config_data -def _get_config_data(path_list, anatomy_data, env=None): +def _get_host_config_data(templates, template_data): """Return first existing path in path list. - If template is used in path inputs, - then it is formatted by anatomy data - and environment variables + Use template data to fill possible formatting in paths. Args: - path_list (list[str]): list of abs paths - anatomy_data (dict): formatting data - env (Optional[dict]): Environment variables. + templates (list[str]): List of templates to config paths. + template_data (dict): Template data used to format templates. Returns: - dict: config data + Union[dict, None]: Config data or 'None' if templates are empty + or any path exists. + """ - formatting_data = deepcopy(anatomy_data) - - environment_vars = env or dict(**os.environ) - - # format the path for potential env vars - formatting_data.update(environment_vars) - - # first try host config paths - for path_ in path_list: - formatted_path = _format_path(path_, formatting_data) - - if not os.path.exists(formatted_path): + for template in templates: + formatted_path = StringTemplate.format_template( + template, template_data + ) + if not formatted_path.solved: continue - return { - "path": os.path.normpath(formatted_path), - "template": path_ - } - - -def _format_path(template_path, formatting_data): - """Single template path formatting. - - Args: - template_path (str): template string - formatting_data (dict): data to be used for - template formatting - - Returns: - str: absolute formatted path - """ - # format path for anatomy keys - formatted_path = StringTemplate(template_path).format( - formatting_data) - - return os.path.abspath(formatted_path) + path = os.path.abspath(formatted_path) + if os.path.exists(path): + return { + "path": os.path.normpath(path), + "template": template + } def get_imageio_file_rules(project_name, host_name, project_settings=None): """Get ImageIO File rules from project settings Args: - project_name (str): project name - host_name (str): host name - project_settings (dict, optional): project settings. - Defaults to None. + project_name (str): Project name. + host_name (str): Host name. + project_settings (Optional[dict]): Project settings. Returns: list[dict[str, Any]]: file rules data + """ project_settings = project_settings or get_project_settings(project_name) @@ -960,7 +1069,7 @@ def get_remapped_colorspace_to_native( """Return native colorspace name. Args: - ocio_colorspace_name (str | None): ocio colorspace name + ocio_colorspace_name (str | None): OCIO colorspace name. host_name (str): Host name. imageio_host_settings (dict[str, Any]): ImageIO host settings. @@ -968,16 +1077,15 @@ def get_remapped_colorspace_to_native( Union[str, None]: native colorspace name defined in remapping or None """ - CachedData.remapping.setdefault(host_name, {}) - if CachedData.remapping[host_name].get("to_native") is None: + host_mapping = CachedData.remapping.setdefault(host_name, {}) + if "to_native" not in host_mapping: remapping_rules = imageio_host_settings["remapping"]["rules"] - CachedData.remapping[host_name]["to_native"] = { + host_mapping["to_native"] = { rule["ocio_name"]: rule["host_native_name"] for rule in remapping_rules } - return CachedData.remapping[host_name]["to_native"].get( - ocio_colorspace_name) + return host_mapping["to_native"].get(ocio_colorspace_name) def get_remapped_colorspace_from_native( @@ -992,30 +1100,29 @@ def get_remapped_colorspace_from_native( Returns: Union[str, None]: Ocio colorspace name defined in remapping or None. - """ - CachedData.remapping.setdefault(host_name, {}) - if CachedData.remapping[host_name].get("from_native") is None: + """ + host_mapping = CachedData.remapping.setdefault(host_name, {}) + if "from_native" not in host_mapping: remapping_rules = imageio_host_settings["remapping"]["rules"] - CachedData.remapping[host_name]["from_native"] = { + host_mapping["from_native"] = { rule["host_native_name"]: rule["ocio_name"] for rule in remapping_rules } - return CachedData.remapping[host_name]["from_native"].get( - host_native_colorspace_name) + return host_mapping["from_native"].get(host_native_colorspace_name) def _get_imageio_settings(project_settings, host_name): """Get ImageIO settings for global and host Args: - project_settings (dict): project settings. - Defaults to None. - host_name (str): host name + project_settings (dict[str, Any]): Project settings. + host_name (str): Host name. Returns: - tuple[dict, dict]: image io settings for global and host + tuple[dict, dict]: Image io settings for global and host. + """ # get image io from global and host_name imageio_global = project_settings["core"]["imageio"] @@ -1033,27 +1140,41 @@ def get_colorspace_settings_from_publish_context(context_data): Returns: tuple | bool: config, file rules or None + """ if "imageioSettings" in context_data and context_data["imageioSettings"]: return context_data["imageioSettings"] project_name = context_data["projectName"] + folder_path = context_data["folderPath"] + task_name = context_data["task"] host_name = context_data["hostName"] - anatomy_data = context_data["anatomyData"] - project_settings_ = context_data["project_settings"] + anatomy = context_data["anatomy"] + template_data = context_data["anatomyData"] + project_settings = context_data["project_settings"] + folder_id = None + folder_entity = context_data.get("folderEntity") + if folder_entity: + folder_id = folder_entity["id"] - config_data = get_imageio_config( - project_name, host_name, - project_settings=project_settings_, - anatomy_data=anatomy_data + config_data = get_imageio_config_preset( + project_name, + folder_path, + task_name, + host_name, + anatomy=anatomy, + project_settings=project_settings, + template_data=template_data, + folder_id=folder_id, ) # caching invalid state, so it's not recalculated all the time file_rules = None if config_data: file_rules = get_imageio_file_rules( - project_name, host_name, - project_settings=project_settings_ + project_name, + host_name, + project_settings=project_settings ) # caching settings for future instance processing @@ -1063,18 +1184,13 @@ def get_colorspace_settings_from_publish_context(context_data): def set_colorspace_data_to_representation( - representation, context_data, + representation, + context_data, colorspace=None, log=None ): """Sets colorspace data to representation. - Args: - representation (dict): publishing representation - context_data (publish.Context.data): publishing context data - colorspace (str, optional): colorspace name. Defaults to None. - log (logging.Logger, optional): logger instance. Defaults to None. - Example: ``` { @@ -1089,6 +1205,12 @@ def set_colorspace_data_to_representation( } ``` + Args: + representation (dict): publishing representation + context_data (publish.Context.data): publishing context data + colorspace (Optional[str]): Colorspace name. + log (Optional[logging.Logger]): logger instance. + """ log = log or Logger.get_logger(__name__) @@ -1122,12 +1244,15 @@ def set_colorspace_data_to_representation( filename = filename[0] # get matching colorspace from rules - colorspace = colorspace or get_imageio_colorspace_from_filepath( - filename, host_name, project_name, - config_data=config_data, - file_rules=file_rules, - project_settings=project_settings - ) + if colorspace is None: + colorspace = get_imageio_file_rules_colorspace_from_filepath( + filename, + host_name, + project_name, + config_data=config_data, + file_rules=file_rules, + project_settings=project_settings + ) # infuse data to representation if colorspace: @@ -1149,47 +1274,330 @@ def get_display_view_colorspace_name(config_path, display, view): view (str): view name e.g. "sRGB" Returns: - view color space name (str) e.g. "Output - sRGB" + str: View color space name. e.g. "Output - sRGB" + """ - - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - return get_display_view_colorspace_subprocess(config_path, - display, view) - - from ayon_core.scripts.ocio_wrapper import _get_display_view_colorspace_name # noqa - - return _get_display_view_colorspace_name(config_path, display, view) + if has_compatible_ocio_package(): + return _get_display_view_colorspace_name( + config_path, display, view + ) + return _get_wrapped_with_subprocess( + "get_display_view_colorspace_name", + config_path=config_path, + display=display, + view=view + ) -def get_display_view_colorspace_subprocess(config_path, display, view): - """Returns the colorspace attribute of the (display, view) pair - via subprocess. +# --- Implementation of logic using 'PyOpenColorIO' --- +def _get_ocio_config(config_path): + """Helper function to create OCIO config object. + + Args: + config_path (str): Path to config. + + Returns: + PyOpenColorIO.Config: OCIO config for the confing path. + + """ + import PyOpenColorIO + + config_path = os.path.abspath(config_path) + + if not os.path.isfile(config_path): + raise IOError("Input path should be `config.ocio` file") + + return PyOpenColorIO.Config.CreateFromFile(config_path) + + +def _get_config_file_rules_colorspace_from_filepath(config_path, filepath): + """Return found colorspace data found in v2 file rules. + + Args: + config_path (str): path string leading to config.ocio + filepath (str): path string leading to v2 file rules + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available colorspaces + + """ + config = _get_ocio_config(config_path) + + # TODO: use `parseColorSpaceFromString` instead if ocio v1 + return config.getColorSpaceFromFilepath(str(filepath)) + + +def _get_config_version_data(config_path): + """Return major and minor version info. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: minor and major keys with values + + """ + config = _get_ocio_config(config_path) + + return { + "major": config.getMajorVersion(), + "minor": config.getMinorVersion() + } + + +def _get_display_view_colorspace_name(config_path, display, view): + """Returns the colorspace attribute of the (display, view) pair. Args: config_path (str): path string leading to config.ocio display (str): display name e.g. "ACES" view (str): view name e.g. "sRGB" + Raises: + IOError: Input config does not exist. + Returns: - view color space name (str) e.g. "Output - sRGB" + str: view color space name e.g. "Output - sRGB" + + """ + config = _get_ocio_config(config_path) + return config.getDisplayViewColorSpaceName(display, view) + + +def _get_ocio_config_colorspaces(config_path): + """Return all found colorspace data. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available colorspaces + + """ + config = _get_ocio_config(config_path) + + colorspace_data = { + "roles": {}, + "colorspaces": { + color.getName(): { + "family": color.getFamily(), + "categories": list(color.getCategories()), + "aliases": list(color.getAliases()), + "equalitygroup": color.getEqualityGroup(), + } + for color in config.getColorSpaces() + }, + "displays_views": { + f"{view} ({display})": { + "display": display, + "view": view + + } + for display in config.getDisplays() + for view in config.getViews(display) + }, + "looks": {} + } + + # add looks + looks = config.getLooks() + if looks: + colorspace_data["looks"] = { + look.getName(): {"process_space": look.getProcessSpace()} + for look in looks + } + + # add roles + roles = config.getRoles() + if roles: + colorspace_data["roles"] = { + role: {"colorspace": colorspace} + for (role, colorspace) in roles + } + + return colorspace_data + + +def _get_ocio_config_views(config_path): + """Return all found viewer data. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available viewers + + """ + config = _get_ocio_config(config_path) + + output = {} + for display in config.getDisplays(): + for view in config.getViews(display): + colorspace = config.getDisplayViewColorSpaceName(display, view) + # Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa + if colorspace == "": + colorspace = display + + output[f"{display}/{view}"] = { + "display": display, + "view": view, + "colorspace": colorspace + } + + return output + + +# --- Current context functions --- +def get_current_context_imageio_config_preset( + anatomy=None, + project_settings=None, + template_data=None, + env=None, +): + """Get ImageIO config preset for current context. + + Args: + anatomy (Optional[Anatomy]): Current project anatomy. + project_settings (Optional[dict[str, Any]]): Current project settings. + template_data (Optional[dict[str, Any]]): Prepared template data + for current context. + env (Optional[dict[str, str]]): Custom environment variable values. + + Returns: + dict: ImageIO config preset. + + """ + from .context_tools import get_current_context, get_current_host_name + + context = get_current_context() + host_name = get_current_host_name() + return get_imageio_config_preset( + context["project_name"], + context["folder_path"], + context["task_name"], + host_name, + anatomy=anatomy, + project_settings=project_settings, + template_data=template_data, + env=env, + ) + + +# --- Deprecated functions --- +@deprecated("has_compatible_ocio_package") +def compatibility_check(): + """Making sure PyOpenColorIO is importable + + Deprecated: + Deprecated since '0.3.2'. Use `has_compatible_ocio_package` instead. """ - with _make_temp_json_file() as tmp_json_path: - # Prepare subprocess arguments - args = [ - "run", get_ocio_config_script_path(), - "config", "get_display_view_colorspace_name", - "--in_path", config_path, - "--out_path", tmp_json_path, - "--display", display, - "--view", view - ] - log.debug("Executing: {}".format(" ".join(args))) + return has_compatible_ocio_package() - run_ayon_launcher_process(*args, logger=log) - # return default view colorspace name - with open(tmp_json_path, "r") as f: - return json.load(f) +@deprecated("get_imageio_file_rules_colorspace_from_filepath") +def get_imageio_colorspace_from_filepath(*args, **kwargs): + return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) + + +@deprecated("get_imageio_file_rules_colorspace_from_filepath") +def get_colorspace_from_filepath(*args, **kwargs): + return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) + + +@deprecated("_get_wrapped_with_subprocess") +def get_colorspace_data_subprocess(config_path): + """[Deprecated] Get colorspace data via subprocess + + Deprecated: + Deprecated since OpenPype. Use `_get_wrapped_with_subprocess` instead. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: colorspace and family in couple + """ + return _get_wrapped_with_subprocess( + "get_ocio_config_colorspaces", + config_path=config_path + ) + + +@deprecated("_get_wrapped_with_subprocess") +def get_views_data_subprocess(config_path): + """[Deprecated] Get viewers data via subprocess + + Deprecated: + Deprecated since OpenPype. Use `_get_wrapped_with_subprocess` instead. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: `display/viewer` and viewer data + + """ + return _get_wrapped_with_subprocess( + "get_ocio_config_views", + config_path=config_path + ) + + +@deprecated("get_imageio_config_preset") +def get_imageio_config( + project_name, + host_name, + project_settings=None, + anatomy_data=None, + anatomy=None, + env=None +): + """Returns config data from settings + + Config path is formatted in `path` key + and original settings input is saved into `template` key. + + Deprecated: + Deprecated since '0.3.1' . Use `get_imageio_config_preset` instead. + + Args: + project_name (str): project name + host_name (str): host name + project_settings (Optional[dict]): Project settings. + anatomy_data (Optional[dict]): anatomy formatting data. + anatomy (Optional[Anatomy]): Anatomy object. + env (Optional[dict]): Environment variables. + + Returns: + dict: config path data or empty dict + + """ + if not anatomy_data: + from .context_tools import get_current_context_template_data + anatomy_data = get_current_context_template_data() + + task_name = anatomy_data.get("task", {}).get("name") + folder_path = anatomy_data.get("folder", {}).get("path") + return get_imageio_config_preset( + project_name, + folder_path, + task_name, + host_name, + anatomy=anatomy, + project_settings=project_settings, + template_data=anatomy_data, + env=env, + ) diff --git a/client/ayon_core/pipeline/context_tools.py b/client/ayon_core/pipeline/context_tools.py index 33567d7280..c32d04c44c 100644 --- a/client/ayon_core/pipeline/context_tools.py +++ b/client/ayon_core/pipeline/context_tools.py @@ -459,36 +459,6 @@ def is_representation_from_latest(representation): ) -def get_template_data_from_session(session=None, settings=None): - """Template data for template fill from session keys. - - Args: - session (Union[Dict[str, str], None]): The Session to use. If not - provided use the currently active global Session. - settings (Optional[Dict[str, Any]]): Prepared studio or project - settings. - - Returns: - Dict[str, Any]: All available data from session. - """ - - if session is not None: - project_name = session["AYON_PROJECT_NAME"] - folder_path = session["AYON_FOLDER_PATH"] - task_name = session["AYON_TASK_NAME"] - host_name = session["AYON_HOST_NAME"] - else: - context = get_current_context() - project_name = context["project_name"] - folder_path = context["folder_path"] - task_name = context["task_name"] - host_name = get_current_host_name() - - return get_template_data_with_names( - project_name, folder_path, task_name, host_name, settings - ) - - def get_current_context_template_data(settings=None): """Prepare template data for current context. diff --git a/client/ayon_core/pipeline/create/context.py b/client/ayon_core/pipeline/create/context.py index b8618738fb..7615ce6aee 100644 --- a/client/ayon_core/pipeline/create/context.py +++ b/client/ayon_core/pipeline/create/context.py @@ -1987,12 +1987,12 @@ class CreateContext: "Folder '{}' was not found".format(folder_path) ) - task_name = None if task_entity is None: - task_name = self.get_current_task_name() - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) + current_task_name = self.get_current_task_name() + if current_task_name: + task_entity = ayon_api.get_task_by_name( + project_name, folder_entity["id"], current_task_name + ) if pre_create_data is None: pre_create_data = {} @@ -2018,7 +2018,7 @@ class CreateContext: instance_data = { "folderPath": folder_entity["path"], - "task": task_name, + "task": task_entity["name"] if task_entity else None, "productType": creator.product_type, "variant": variant } @@ -2053,7 +2053,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 add_traceback = True exc_info = sys.exc_info() self.log.warning( @@ -2163,7 +2163,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2197,7 +2197,7 @@ class CreateContext: try: convertor.find_instances() - except: + except: # noqa: E722 failed_info.append( prepare_failed_convertor_operation_info( convertor.identifier, sys.exc_info() @@ -2373,7 +2373,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2440,7 +2440,7 @@ class CreateContext: error_message.format(identifier, exc_info[1]) ) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2546,7 +2546,7 @@ class CreateContext: try: self.run_convertor(convertor_identifier) - except: + except: # noqa: E722 failed_info.append( prepare_failed_convertor_operation_info( convertor_identifier, sys.exc_info() diff --git a/client/ayon_core/pipeline/farm/pyblish_functions.py b/client/ayon_core/pipeline/farm/pyblish_functions.py index eb6f8569d9..72deee185e 100644 --- a/client/ayon_core/pipeline/farm/pyblish_functions.py +++ b/client/ayon_core/pipeline/farm/pyblish_functions.py @@ -225,6 +225,7 @@ def create_skeleton_instance( instance_skeleton_data = { "productType": product_type, "productName": data["productName"], + "task": data["task"], "families": families, "folderPath": data["folderPath"], "frameStart": time_data.start, diff --git a/client/ayon_core/pipeline/publish/abstract_collect_render.py b/client/ayon_core/pipeline/publish/abstract_collect_render.py index c50dc16380..17cab876b6 100644 --- a/client/ayon_core/pipeline/publish/abstract_collect_render.py +++ b/client/ayon_core/pipeline/publish/abstract_collect_render.py @@ -80,6 +80,7 @@ class RenderInstance(object): anatomyData = attr.ib(default=None) outputDir = attr.ib(default=None) context = attr.ib(default=None) + deadline = attr.ib(default=None) # The source instance the data of this render instance should merge into source_instance = attr.ib(default=None, type=pyblish.api.Instance) @@ -215,13 +216,12 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): # add additional data data = self.add_additional_data(data) - render_instance_dict = attr.asdict(render_instance) - # Merge into source instance if provided, otherwise create instance - instance = render_instance_dict.pop("source_instance", None) + instance = render_instance.source_instance if instance is None: instance = context.create_instance(render_instance.name) + render_instance_dict = attr.asdict(render_instance) instance.data.update(render_instance_dict) instance.data.update(data) diff --git a/client/ayon_core/pipeline/template_data.py b/client/ayon_core/pipeline/template_data.py index 526c7d35c5..d5f06d6a59 100644 --- a/client/ayon_core/pipeline/template_data.py +++ b/client/ayon_core/pipeline/template_data.py @@ -73,8 +73,8 @@ def get_folder_template_data(folder_entity, project_name): - 'parent' - direct parent name, project name used if is under project - Required document fields: - Folder: 'path' -> Plan to require: 'folderType' + Required entity fields: + Folder: 'path', 'folderType' Args: folder_entity (Dict[str, Any]): Folder entity. @@ -101,6 +101,8 @@ def get_folder_template_data(folder_entity, project_name): return { "folder": { "name": folder_name, + "type": folder_entity["folderType"], + "path": path, }, "asset": folder_name, "hierarchy": hierarchy, diff --git a/client/ayon_core/pipeline/thumbnails.py b/client/ayon_core/pipeline/thumbnails.py new file mode 100644 index 0000000000..dbb38615d8 --- /dev/null +++ b/client/ayon_core/pipeline/thumbnails.py @@ -0,0 +1,263 @@ +import os +import time +import collections + +import ayon_api + +from ayon_core.lib.local_settings import get_ayon_appdirs + + +FileInfo = collections.namedtuple( + "FileInfo", + ("path", "size", "modification_time") +) + + +class ThumbnailsCache: + """Cache of thumbnails on local storage. + + Thumbnails are cached to appdirs to predefined directory. Each project has + own subfolder with thumbnails -> that's because each project has own + thumbnail id validation and file names are thumbnail ids with matching + extension. Extensions are predefined (.png and .jpeg). + + Cache has cleanup mechanism which is triggered on initialized by default. + + The cleanup has 2 levels: + 1. soft cleanup which remove all files that are older then 'days_alive' + 2. max size cleanup which remove all files until the thumbnails folder + contains less then 'max_filesize' + - this is time consuming so it's not triggered automatically + + Args: + cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails). + """ + + # Lifetime of thumbnails (in seconds) + # - default 3 days + days_alive = 3 + # Max size of thumbnail directory (in bytes) + # - default 2 Gb + max_filesize = 2 * 1024 * 1024 * 1024 + + def __init__(self, cleanup=True): + self._thumbnails_dir = None + self._days_alive_secs = self.days_alive * 24 * 60 * 60 + if cleanup: + self.cleanup() + + def get_thumbnails_dir(self): + """Root directory where thumbnails are stored. + + Returns: + str: Path to thumbnails root. + """ + + if self._thumbnails_dir is None: + self._thumbnails_dir = get_ayon_appdirs("thumbnails") + return self._thumbnails_dir + + thumbnails_dir = property(get_thumbnails_dir) + + def get_thumbnails_dir_file_info(self): + """Get information about all files in thumbnails directory. + + Returns: + List[FileInfo]: List of file information about all files. + """ + + thumbnails_dir = self.thumbnails_dir + files_info = [] + if not os.path.exists(thumbnails_dir): + return files_info + + for root, _, filenames in os.walk(thumbnails_dir): + for filename in filenames: + path = os.path.join(root, filename) + files_info.append(FileInfo( + path, os.path.getsize(path), os.path.getmtime(path) + )) + return files_info + + def get_thumbnails_dir_size(self, files_info=None): + """Got full size of thumbnail directory. + + Args: + files_info (List[FileInfo]): Prepared file information about + files in thumbnail directory. + + Returns: + int: File size of all files in thumbnail directory. + """ + + if files_info is None: + files_info = self.get_thumbnails_dir_file_info() + + if not files_info: + return 0 + + return sum( + file_info.size + for file_info in files_info + ) + + def cleanup(self, check_max_size=False): + """Cleanup thumbnails directory. + + Args: + check_max_size (bool): Also cleanup files to match max size of + thumbnails directory. + """ + + thumbnails_dir = self.get_thumbnails_dir() + # Skip if thumbnails dir does not exist yet + if not os.path.exists(thumbnails_dir): + return + + self._soft_cleanup(thumbnails_dir) + if check_max_size: + self._max_size_cleanup(thumbnails_dir) + + def _soft_cleanup(self, thumbnails_dir): + current_time = time.time() + for root, _, filenames in os.walk(thumbnails_dir): + for filename in filenames: + path = os.path.join(root, filename) + modification_time = os.path.getmtime(path) + if current_time - modification_time > self._days_alive_secs: + os.remove(path) + + def _max_size_cleanup(self, thumbnails_dir): + files_info = self.get_thumbnails_dir_file_info() + size = self.get_thumbnails_dir_size(files_info) + if size < self.max_filesize: + return + + sorted_file_info = collections.deque( + sorted(files_info, key=lambda item: item.modification_time) + ) + diff = size - self.max_filesize + while diff > 0: + if not sorted_file_info: + break + + file_info = sorted_file_info.popleft() + diff -= file_info.size + os.remove(file_info.path) + + def get_thumbnail_filepath(self, project_name, thumbnail_id): + """Get thumbnail by thumbnail id. + + Args: + project_name (str): Name of project. + thumbnail_id (str): Thumbnail id. + + Returns: + Union[str, None]: Path to thumbnail image or None if thumbnail + is not cached yet. + """ + + if not thumbnail_id: + return None + + for ext in ( + ".png", + ".jpeg", + ): + filepath = os.path.join( + self.thumbnails_dir, project_name, thumbnail_id + ext + ) + if os.path.exists(filepath): + return filepath + return None + + def get_project_dir(self, project_name): + """Path to root directory for specific project. + + Args: + project_name (str): Name of project for which root directory path + should be returned. + + Returns: + str: Path to root of project's thumbnails. + """ + + return os.path.join(self.thumbnails_dir, project_name) + + def make_sure_project_dir_exists(self, project_name): + project_dir = self.get_project_dir(project_name) + if not os.path.exists(project_dir): + os.makedirs(project_dir) + return project_dir + + def store_thumbnail(self, project_name, thumbnail_id, content, mime_type): + """Store thumbnail to cache folder. + + Args: + project_name (str): Project where the thumbnail belong to. + thumbnail_id (str): Thumbnail id. + content (bytes): Byte content of thumbnail file. + mime_type (str): Type of content. + + Returns: + str: Path to cached thumbnail image file. + """ + + if mime_type == "image/png": + ext = ".png" + elif mime_type == "image/jpeg": + ext = ".jpeg" + else: + raise ValueError( + "Unknown mime type for thumbnail \"{}\"".format(mime_type)) + + project_dir = self.make_sure_project_dir_exists(project_name) + thumbnail_path = os.path.join(project_dir, thumbnail_id + ext) + with open(thumbnail_path, "wb") as stream: + stream.write(content) + + current_time = time.time() + os.utime(thumbnail_path, (current_time, current_time)) + + return thumbnail_path + + +class _CacheItems: + thumbnails_cache = ThumbnailsCache() + + +def get_thumbnail_path(project_name, thumbnail_id): + """Get path to thumbnail image. + + Args: + project_name (str): Project where thumbnail belongs to. + thumbnail_id (Union[str, None]): Thumbnail id. + + Returns: + Union[str, None]: Path to thumbnail image or None if thumbnail + id is not valid or thumbnail was not possible to receive. + + """ + if not thumbnail_id: + return None + + filepath = _CacheItems.thumbnails_cache.get_thumbnail_filepath( + project_name, thumbnail_id + ) + if filepath is not None: + return filepath + + # 'ayon_api' had a bug, public function + # 'get_thumbnail_by_id' did not return output of + # 'ServerAPI' method. + con = ayon_api.get_server_api_connection() + result = con.get_thumbnail_by_id(project_name, thumbnail_id) + + if result is not None and result.is_valid: + return _CacheItems.thumbnails_cache.store_thumbnail( + project_name, + thumbnail_id, + result.content, + result.content_type + ) + return None diff --git a/client/ayon_core/pipeline/workfile/__init__.py b/client/ayon_core/pipeline/workfile/__init__.py index 36766e3a04..05f939024c 100644 --- a/client/ayon_core/pipeline/workfile/__init__.py +++ b/client/ayon_core/pipeline/workfile/__init__.py @@ -21,6 +21,15 @@ from .utils import ( from .build_workfile import BuildWorkfile +from .workfile_template_builder import ( + discover_workfile_build_plugins, + register_workfile_build_plugin, + deregister_workfile_build_plugin, + register_workfile_build_plugin_path, + deregister_workfile_build_plugin_path, +) + + __all__ = ( "get_workfile_template_key_from_context", "get_workfile_template_key", @@ -39,4 +48,10 @@ __all__ = ( "should_open_workfiles_tool_on_launch", "BuildWorkfile", + + "discover_workfile_build_plugins", + "register_workfile_build_plugin", + "deregister_workfile_build_plugin", + "register_workfile_build_plugin_path", + "deregister_workfile_build_plugin_path", ) diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py index 5e63ba444a..bb94d87483 100644 --- a/client/ayon_core/pipeline/workfile/workfile_template_builder.py +++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py @@ -36,6 +36,7 @@ from ayon_core.lib import ( filter_profiles, attribute_definitions, ) +from ayon_core.lib.events import EventSystem, EventCallback, Event from ayon_core.lib.attribute_definitions import get_attributes_keys from ayon_core.pipeline import Anatomy from ayon_core.pipeline.load import ( @@ -43,6 +44,13 @@ from ayon_core.pipeline.load import ( get_representation_contexts, load_with_repre_context, ) +from ayon_core.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) from ayon_core.pipeline.create import ( discover_legacy_creator_plugins, @@ -124,6 +132,8 @@ class AbstractTemplateBuilder(object): self._current_task_entity = _NOT_SET self._linked_folder_entities = _NOT_SET + self._event_system = EventSystem() + @property def project_name(self): if isinstance(self._host, HostBase): @@ -211,10 +221,14 @@ class AbstractTemplateBuilder(object): Returns: List[PlaceholderPlugin]: Plugin classes available for host. """ + plugins = [] + # Backwards compatibility if hasattr(self._host, "get_workfile_build_placeholder_plugins"): return self._host.get_workfile_build_placeholder_plugins() - return [] + + plugins.extend(discover(PlaceholderPlugin)) + return plugins @property def host(self): @@ -257,6 +271,8 @@ class AbstractTemplateBuilder(object): self._project_settings = None + self._event_system = EventSystem() + self.clear_shared_data() self.clear_shared_populate_data() @@ -329,7 +345,7 @@ class AbstractTemplateBuilder(object): is good practice to check if the same value is not already stored under different key or if the key is not already used for something else. - Key should be self explanatory to content. + Key should be self-explanatory to content. - wrong: 'folder' - good: 'folder_name' @@ -375,7 +391,7 @@ class AbstractTemplateBuilder(object): is good practice to check if the same value is not already stored under different key or if the key is not already used for something else. - Key should be self explanatory to content. + Key should be self-explanatory to content. - wrong: 'folder' - good: 'folder_path' @@ -395,7 +411,7 @@ class AbstractTemplateBuilder(object): is good practice to check if the same value is not already stored under different key or if the key is not already used for something else. - Key should be self explanatory to content. + Key should be self-explanatory to content. - wrong: 'folder' - good: 'folder_path' @@ -466,7 +482,7 @@ class AbstractTemplateBuilder(object): return list(sorted( placeholders, - key=lambda i: i.order + key=lambda placeholder: placeholder.order )) def build_template( @@ -498,15 +514,21 @@ class AbstractTemplateBuilder(object): process if version is created """ - template_preset = self.get_template_preset() - - if template_path is None: - template_path = template_preset["path"] - - if keep_placeholders is None: - keep_placeholders = template_preset["keep_placeholder"] - if create_first_version is None: - create_first_version = template_preset["create_first_version"] + if any( + value is None + for value in [ + template_path, + keep_placeholders, + create_first_version, + ] + ): + template_preset = self.get_template_preset() + if template_path is None: + template_path = template_preset["path"] + if keep_placeholders is None: + keep_placeholders = template_preset["keep_placeholder"] + if create_first_version is None: + create_first_version = template_preset["create_first_version"] # check if first version is created created_version_workfile = False @@ -685,7 +707,7 @@ class AbstractTemplateBuilder(object): for placeholder in placeholders } all_processed = len(placeholders) == 0 - # Counter is checked at the ned of a loop so the loop happens at least + # Counter is checked at the end of a loop so the loop happens at least # once. iter_counter = 0 while not all_processed: @@ -729,6 +751,16 @@ class AbstractTemplateBuilder(object): placeholder.set_finished() + # Trigger on_depth_processed event + self.emit_event( + topic="template.depth_processed", + data={ + "depth": iter_counter, + "placeholders_by_scene_id": placeholder_by_scene_id + }, + source="builder" + ) + # Clear shared data before getting new placeholders self.clear_shared_populate_data() @@ -747,6 +779,16 @@ class AbstractTemplateBuilder(object): placeholder_by_scene_id[identifier] = placeholder placeholders.append(placeholder) + # Trigger on_finished event + self.emit_event( + topic="template.finished", + data={ + "depth": iter_counter, + "placeholders_by_scene_id": placeholder_by_scene_id, + }, + source="builder" + ) + self.refresh() def _get_build_profiles(self): @@ -772,12 +814,14 @@ class AbstractTemplateBuilder(object): - 'project_settings/{host name}/templated_workfile_build/profiles' Returns: - str: Path to a template file with placeholders. + dict: Dictionary with `path`, `keep_placeholder` and + `create_first_version` settings from the template preset + for current context. Raises: TemplateProfileNotFound: When profiles are not filled. TemplateLoadFailed: Profile was found but path is not set. - TemplateNotFound: Path was set but file does not exists. + TemplateNotFound: Path was set but file does not exist. """ host_name = self.host_name @@ -872,6 +916,30 @@ class AbstractTemplateBuilder(object): "create_first_version": create_first_version } + def emit_event(self, topic, data=None, source=None) -> Event: + return self._event_system.emit(topic, data, source) + + def add_event_callback(self, topic, callback, order=None): + return self._event_system.add_callback(topic, callback, order=order) + + def add_on_finished_callback( + self, callback, order=None + ) -> EventCallback: + return self.add_event_callback( + topic="template.finished", + callback=callback, + order=order + ) + + def add_on_depth_processed_callback( + self, callback, order=None + ) -> EventCallback: + return self.add_event_callback( + topic="template.depth_processed", + callback=callback, + order=order + ) + @six.add_metaclass(ABCMeta) class PlaceholderPlugin(object): @@ -1045,7 +1113,7 @@ class PlaceholderPlugin(object): Using shared data from builder but stored under plugin identifier. - Key should be self explanatory to content. + Key should be self-explanatory to content. - wrong: 'folder' - good: 'folder_path' @@ -1085,7 +1153,7 @@ class PlaceholderPlugin(object): Using shared data from builder but stored under plugin identifier. - Key should be self explanatory to content. + Key should be self-explanatory to content. - wrong: 'folder' - good: 'folder_path' @@ -1107,10 +1175,10 @@ class PlaceholderItem(object): """Item representing single item in scene that is a placeholder to process. Items are always created and updated by their plugins. Each plugin can use - modified class of 'PlacehoderItem' but only to add more options instead of + modified class of 'PlaceholderItem' but only to add more options instead of new other. - Scene identifier is used to avoid processing of the palceholder item + Scene identifier is used to avoid processing of the placeholder item multiple times so must be unique across whole workfile builder. Args: @@ -1162,7 +1230,7 @@ class PlaceholderItem(object): """Placeholder data which can modify how placeholder is processed. Possible general keys - - order: Can define the order in which is palceholder processed. + - order: Can define the order in which is placeholder processed. Lower == earlier. Other keys are defined by placeholder and should validate them on item @@ -1264,11 +1332,9 @@ class PlaceholderLoadMixin(object): """Unified attribute definitions for load placeholder. Common function for placeholder plugins used for loading of - repsentations. Use it in 'get_placeholder_options'. + representations. Use it in 'get_placeholder_options'. Args: - plugin (PlaceholderPlugin): Plugin used for loading of - representations. options (Dict[str, Any]): Already available options which are used as defaults for attributes. @@ -1468,7 +1534,9 @@ class PlaceholderLoadMixin(object): product_name_regex = None if product_name_regex_value: product_name_regex = re.compile(product_name_regex_value) - product_type = placeholder.data["family"] + product_type = placeholder.data.get("product_type") + if product_type is None: + product_type = placeholder.data["family"] builder_type = placeholder.data["builder_type"] folder_ids = [] @@ -1529,35 +1597,22 @@ class PlaceholderLoadMixin(object): pass - def _reduce_last_version_repre_entities(self, representations): - """Reduce representations to last verison.""" + def _reduce_last_version_repre_entities(self, repre_contexts): + """Reduce representations to last version.""" - mapping = {} - # TODO use representation context with entities - # - using 'folder', 'subset' and 'version' from context on - # representation is danger - for repre_entity in representations: - repre_context = repre_entity["context"] - - folder_name = repre_context["asset"] - product_name = repre_context["subset"] - version = repre_context.get("version", -1) - - if folder_name not in mapping: - mapping[folder_name] = {} - - product_mapping = mapping[folder_name] - if product_name not in product_mapping: - product_mapping[product_name] = collections.defaultdict(list) - - version_mapping = product_mapping[product_name] - version_mapping[version].append(repre_entity) + version_mapping_by_product_id = {} + for repre_context in repre_contexts: + product_id = repre_context["product"]["id"] + version = repre_context["version"]["version"] + version_mapping = version_mapping_by_product_id.setdefault( + product_id, {} + ) + version_mapping.setdefault(version, []).append(repre_context) output = [] - for product_mapping in mapping.values(): - for version_mapping in product_mapping.values(): - last_version = tuple(sorted(version_mapping.keys()))[-1] - output.extend(version_mapping[last_version]) + for version_mapping in version_mapping_by_product_id.values(): + last_version = max(version_mapping.keys()) + output.extend(version_mapping[last_version]) return output def populate_load_placeholder(self, placeholder, ignore_repre_ids=None): @@ -1585,32 +1640,33 @@ class PlaceholderLoadMixin(object): loader_name = placeholder.data["loader"] loader_args = self.parse_loader_args(placeholder.data["loader_args"]) - placeholder_representations = self._get_representations(placeholder) + placeholder_representations = [ + repre_entity + for repre_entity in self._get_representations(placeholder) + if repre_entity["id"] not in ignore_repre_ids + ] - filtered_representations = [] - for representation in self._reduce_last_version_repre_entities( - placeholder_representations - ): - repre_id = representation["id"] - if repre_id not in ignore_repre_ids: - filtered_representations.append(representation) - - if not filtered_representations: + repre_load_contexts = get_representation_contexts( + self.project_name, placeholder_representations + ) + filtered_repre_contexts = self._reduce_last_version_repre_entities( + repre_load_contexts.values() + ) + if not filtered_repre_contexts: self.log.info(( "There's no representation for this placeholder: {}" ).format(placeholder.scene_identifier)) + if not placeholder.data.get("keep_placeholder", True): + self.delete_placeholder(placeholder) return - repre_load_contexts = get_representation_contexts( - self.project_name, filtered_representations - ) loaders_by_name = self.builder.get_loaders_by_name() self._before_placeholder_load( placeholder ) failed = False - for repre_load_context in repre_load_contexts.values(): + for repre_load_context in filtered_repre_contexts: folder_path = repre_load_context["folder"]["path"] product_name = repre_load_context["product"]["name"] representation = repre_load_context["representation"] @@ -1695,8 +1751,6 @@ class PlaceholderCreateMixin(object): publishable instances. Use it with 'get_placeholder_options'. Args: - plugin (PlaceholderPlugin): Plugin used for creating of - publish instances. options (Dict[str, Any]): Already available options which are used as defaults for attributes. @@ -1918,3 +1972,23 @@ class CreatePlaceholderItem(PlaceholderItem): def create_failed(self, creator_data): self._failed_created_publish_instances.append(creator_data) + + +def discover_workfile_build_plugins(*args, **kwargs): + return discover(PlaceholderPlugin, *args, **kwargs) + + +def register_workfile_build_plugin(plugin: PlaceholderPlugin): + register_plugin(PlaceholderPlugin, plugin) + + +def deregister_workfile_build_plugin(plugin: PlaceholderPlugin): + deregister_plugin(PlaceholderPlugin, plugin) + + +def register_workfile_build_plugin_path(path: str): + register_plugin_path(PlaceholderPlugin, path) + + +def deregister_workfile_build_plugin_path(path: str): + deregister_plugin_path(PlaceholderPlugin, path) diff --git a/client/ayon_core/plugins/load/delete_old_versions.py b/client/ayon_core/plugins/load/delete_old_versions.py index 8e04fd9827..62302e7123 100644 --- a/client/ayon_core/plugins/load/delete_old_versions.py +++ b/client/ayon_core/plugins/load/delete_old_versions.py @@ -1,501 +1,426 @@ -# TODO This plugin is not converted for AYON -# -# import collections -# import os -# import uuid -# -# import clique -# import ayon_api -# from pymongo import UpdateOne -# import qargparse -# from qtpy import QtWidgets, QtCore -# -# from ayon_core import style -# from ayon_core.addon import AddonsManager -# from ayon_core.lib import format_file_size -# from ayon_core.pipeline import load, Anatomy -# from ayon_core.pipeline.load import ( -# get_representation_path_with_anatomy, -# InvalidRepresentationContext, -# ) -# -# -# class DeleteOldVersions(load.ProductLoaderPlugin): -# """Deletes specific number of old version""" -# -# is_multiple_contexts_compatible = True -# sequence_splitter = "__sequence_splitter__" -# -# representations = {"*"} -# product_types = {"*"} -# tool_names = ["library_loader"] -# -# label = "Delete Old Versions" -# order = 35 -# icon = "trash" -# color = "#d8d8d8" -# -# options = [ -# qargparse.Integer( -# "versions_to_keep", default=2, min=0, help="Versions to keep:" -# ), -# qargparse.Boolean( -# "remove_publish_folder", help="Remove publish folder:" -# ) -# ] -# -# def delete_whole_dir_paths(self, dir_paths, delete=True): -# size = 0 -# -# for dir_path in dir_paths: -# # Delete all files and fodlers in dir path -# for root, dirs, files in os.walk(dir_path, topdown=False): -# for name in files: -# file_path = os.path.join(root, name) -# size += os.path.getsize(file_path) -# if delete: -# os.remove(file_path) -# self.log.debug("Removed file: {}".format(file_path)) -# -# for name in dirs: -# if delete: -# os.rmdir(os.path.join(root, name)) -# -# if not delete: -# continue -# -# # Delete even the folder and it's parents folders if they are empty -# while True: -# if not os.path.exists(dir_path): -# dir_path = os.path.dirname(dir_path) -# continue -# -# if len(os.listdir(dir_path)) != 0: -# break -# -# os.rmdir(os.path.join(dir_path)) -# -# return size -# -# def path_from_representation(self, representation, anatomy): -# try: -# context = representation["context"] -# except KeyError: -# return (None, None) -# -# try: -# path = get_representation_path_with_anatomy( -# representation, anatomy -# ) -# except InvalidRepresentationContext: -# return (None, None) -# -# sequence_path = None -# if "frame" in context: -# context["frame"] = self.sequence_splitter -# sequence_path = get_representation_path_with_anatomy( -# representation, anatomy -# ) -# -# if sequence_path: -# sequence_path = sequence_path.normalized() -# -# return (path.normalized(), sequence_path) -# -# def delete_only_repre_files(self, dir_paths, file_paths, delete=True): -# size = 0 -# -# for dir_id, dir_path in dir_paths.items(): -# dir_files = os.listdir(dir_path) -# collections, remainders = clique.assemble(dir_files) -# for file_path, seq_path in file_paths[dir_id]: -# file_path_base = os.path.split(file_path)[1] -# # Just remove file if `frame` key was not in context or -# # filled path is in remainders (single file sequence) -# if not seq_path or file_path_base in remainders: -# if not os.path.exists(file_path): -# self.log.debug( -# "File was not found: {}".format(file_path) -# ) -# continue -# -# size += os.path.getsize(file_path) -# -# if delete: -# os.remove(file_path) -# self.log.debug("Removed file: {}".format(file_path)) -# -# if file_path_base in remainders: -# remainders.remove(file_path_base) -# continue -# -# seq_path_base = os.path.split(seq_path)[1] -# head, tail = seq_path_base.split(self.sequence_splitter) -# -# final_col = None -# for collection in collections: -# if head != collection.head or tail != collection.tail: -# continue -# final_col = collection -# break -# -# if final_col is not None: -# # Fill full path to head -# final_col.head = os.path.join(dir_path, final_col.head) -# for _file_path in final_col: -# if os.path.exists(_file_path): -# -# size += os.path.getsize(_file_path) -# -# if delete: -# os.remove(_file_path) -# self.log.debug( -# "Removed file: {}".format(_file_path) -# ) -# -# _seq_path = final_col.format("{head}{padding}{tail}") -# self.log.debug("Removed files: {}".format(_seq_path)) -# collections.remove(final_col) -# -# elif os.path.exists(file_path): -# size += os.path.getsize(file_path) -# -# if delete: -# os.remove(file_path) -# self.log.debug("Removed file: {}".format(file_path)) -# else: -# self.log.debug( -# "File was not found: {}".format(file_path) -# ) -# -# # Delete as much as possible parent folders -# if not delete: -# return size -# -# for dir_path in dir_paths.values(): -# while True: -# if not os.path.exists(dir_path): -# dir_path = os.path.dirname(dir_path) -# continue -# -# if len(os.listdir(dir_path)) != 0: -# break -# -# self.log.debug("Removed folder: {}".format(dir_path)) -# os.rmdir(dir_path) -# -# return size -# -# def message(self, text): -# msgBox = QtWidgets.QMessageBox() -# msgBox.setText(text) -# msgBox.setStyleSheet(style.load_stylesheet()) -# msgBox.setWindowFlags( -# msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint -# ) -# msgBox.exec_() -# -# def get_data(self, context, versions_count): -# product_entity = context["product"] -# folder_entity = context["folder"] -# project_name = context["project"]["name"] -# anatomy = Anatomy(project_name) -# -# versions = list(ayon_api.get_versions( -# project_name, product_ids=[product_entity["id"]] -# )) -# -# versions_by_parent = collections.defaultdict(list) -# for ent in versions: -# versions_by_parent[ent["productId"]].append(ent) -# -# def sort_func(ent): -# return int(ent["version"]) -# -# all_last_versions = [] -# for _parent_id, _versions in versions_by_parent.items(): -# for idx, version in enumerate( -# sorted(_versions, key=sort_func, reverse=True) -# ): -# if idx >= versions_count: -# break -# all_last_versions.append(version) -# -# self.log.debug("Collected versions ({})".format(len(versions))) -# -# # Filter latest versions -# for version in all_last_versions: -# versions.remove(version) -# -# # Update versions_by_parent without filtered versions -# versions_by_parent = collections.defaultdict(list) -# for ent in versions: -# versions_by_parent[ent["productId"]].append(ent) -# -# # Filter already deleted versions -# versions_to_pop = [] -# for version in versions: -# version_tags = version["data"].get("tags") -# if version_tags and "deleted" in version_tags: -# versions_to_pop.append(version) -# -# for version in versions_to_pop: -# msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format( -# folder_entity["path"], -# product_entity["name"], -# version["version"] -# ) -# self.log.debug(( -# "Skipping version. Already tagged as `deleted`. < {} >" -# ).format(msg)) -# versions.remove(version) -# -# version_ids = [ent["id"] for ent in versions] -# -# self.log.debug( -# "Filtered versions to delete ({})".format(len(version_ids)) -# ) -# -# if not version_ids: -# msg = "Skipping processing. Nothing to delete on {}/{}".format( -# folder_entity["path"], product_entity["name"] -# ) -# self.log.info(msg) -# print(msg) -# return -# -# repres = list(ayon_api.get_representations( -# project_name, version_ids=version_ids -# )) -# -# self.log.debug( -# "Collected representations to remove ({})".format(len(repres)) -# ) -# -# dir_paths = {} -# file_paths_by_dir = collections.defaultdict(list) -# for repre in repres: -# file_path, seq_path = self.path_from_representation( -# repre, anatomy -# ) -# if file_path is None: -# self.log.debug(( -# "Could not format path for represenation \"{}\"" -# ).format(str(repre))) -# continue -# -# dir_path = os.path.dirname(file_path) -# dir_id = None -# for _dir_id, _dir_path in dir_paths.items(): -# if _dir_path == dir_path: -# dir_id = _dir_id -# break -# -# if dir_id is None: -# dir_id = uuid.uuid4() -# dir_paths[dir_id] = dir_path -# -# file_paths_by_dir[dir_id].append([file_path, seq_path]) -# -# dir_ids_to_pop = [] -# for dir_id, dir_path in dir_paths.items(): -# if os.path.exists(dir_path): -# continue -# -# dir_ids_to_pop.append(dir_id) -# -# # Pop dirs from both dictionaries -# for dir_id in dir_ids_to_pop: -# dir_paths.pop(dir_id) -# paths = file_paths_by_dir.pop(dir_id) -# # TODO report of missing directories? -# paths_msg = ", ".join([ -# "'{}'".format(path[0].replace("\\", "/")) for path in paths -# ]) -# self.log.debug(( -# "Folder does not exist. Deleting it's files skipped: {}" -# ).format(paths_msg)) -# -# return { -# "dir_paths": dir_paths, -# "file_paths_by_dir": file_paths_by_dir, -# "versions": versions, -# "folder": folder_entity, -# "product": product_entity, -# "archive_product": versions_count == 0 -# } -# -# def main(self, project_name, data, remove_publish_folder): -# # Size of files. -# size = 0 -# if not data: -# return size -# -# if remove_publish_folder: -# size = self.delete_whole_dir_paths(data["dir_paths"].values()) -# else: -# size = self.delete_only_repre_files( -# data["dir_paths"], data["file_paths_by_dir"] -# ) -# -# mongo_changes_bulk = [] -# for version in data["versions"]: -# orig_version_tags = version["data"].get("tags") or [] -# version_tags = [tag for tag in orig_version_tags] -# if "deleted" not in version_tags: -# version_tags.append("deleted") -# -# if version_tags == orig_version_tags: -# continue -# -# update_query = {"id": version["id"]} -# update_data = {"$set": {"data.tags": version_tags}} -# mongo_changes_bulk.append(UpdateOne(update_query, update_data)) -# -# if data["archive_product"]: -# mongo_changes_bulk.append(UpdateOne( -# { -# "id": data["product"]["id"], -# "type": "subset" -# }, -# {"$set": {"type": "archived_subset"}} -# )) -# -# if mongo_changes_bulk: -# dbcon = AvalonMongoDB() -# dbcon.Session["AYON_PROJECT_NAME"] = project_name -# dbcon.install() -# dbcon.bulk_write(mongo_changes_bulk) -# dbcon.uninstall() -# -# self._ftrack_delete_versions(data) -# -# return size -# -# def _ftrack_delete_versions(self, data): -# """Delete version on ftrack. -# -# Handling of ftrack logic in this plugin is not ideal. But in OP3 it is -# almost impossible to solve the issue other way. -# -# Note: -# Asset versions on ftrack are not deleted but marked as -# "not published" which cause that they're invisible. -# -# Args: -# data (dict): Data sent to product loader with full context. -# """ -# -# # First check for ftrack id on folder entity -# # - skip if ther is none -# ftrack_id = data["folder"]["attrib"].get("ftrackId") -# if not ftrack_id: -# self.log.info(( -# "Folder does not have filled ftrack id. Skipped delete" -# " of ftrack version." -# )) -# return -# -# # Check if ftrack module is enabled -# addons_manager = AddonsManager() -# ftrack_addon = addons_manager.get("ftrack") -# if not ftrack_addon or not ftrack_addon.enabled: -# return -# -# import ftrack_api -# -# session = ftrack_api.Session() -# product_name = data["product"]["name"] -# versions = { -# '"{}"'.format(version_doc["name"]) -# for version_doc in data["versions"] -# } -# asset_versions = session.query( -# ( -# "select id, is_published from AssetVersion where" -# " asset.parent.id is \"{}\"" -# " and asset.name is \"{}\"" -# " and version in ({})" -# ).format( -# ftrack_id, -# product_name, -# ",".join(versions) -# ) -# ).all() -# -# # Set attribute `is_published` to `False` on ftrack AssetVersions -# for asset_version in asset_versions: -# asset_version["is_published"] = False -# -# try: -# session.commit() -# -# except Exception: -# msg = ( -# "Could not set `is_published` attribute to `False`" -# " for selected AssetVersions." -# ) -# self.log.error(msg) -# self.message(msg) -# -# def load(self, contexts, name=None, namespace=None, options=None): -# try: -# size = 0 -# for count, context in enumerate(contexts): -# versions_to_keep = 2 -# remove_publish_folder = False -# if options: -# versions_to_keep = options.get( -# "versions_to_keep", versions_to_keep -# ) -# remove_publish_folder = options.get( -# "remove_publish_folder", remove_publish_folder -# ) -# -# data = self.get_data(context, versions_to_keep) -# if not data: -# continue -# -# project_name = context["project"]["name"] -# size += self.main(project_name, data, remove_publish_folder) -# print("Progressing {}/{}".format(count + 1, len(contexts))) -# -# msg = "Total size of files: {}".format(format_file_size(size)) -# self.log.info(msg) -# self.message(msg) -# -# except Exception: -# self.log.error("Failed to delete versions.", exc_info=True) -# -# -# class CalculateOldVersions(DeleteOldVersions): -# """Calculate file size of old versions""" -# label = "Calculate Old Versions" -# order = 30 -# tool_names = ["library_loader"] -# -# options = [ -# qargparse.Integer( -# "versions_to_keep", default=2, min=0, help="Versions to keep:" -# ), -# qargparse.Boolean( -# "remove_publish_folder", help="Remove publish folder:" -# ) -# ] -# -# def main(self, project_name, data, remove_publish_folder): -# size = 0 -# -# if not data: -# return size -# -# if remove_publish_folder: -# size = self.delete_whole_dir_paths( -# data["dir_paths"].values(), delete=False -# ) -# else: -# size = self.delete_only_repre_files( -# data["dir_paths"], data["file_paths_by_dir"], delete=False -# ) -# -# return size +import collections +import os +import uuid + +import clique +import ayon_api +from ayon_api.operations import OperationsSession +import qargparse +from qtpy import QtWidgets, QtCore + +from ayon_core import style +from ayon_core.lib import format_file_size +from ayon_core.pipeline import load, Anatomy +from ayon_core.pipeline.load import ( + get_representation_path_with_anatomy, + InvalidRepresentationContext, +) + + +class DeleteOldVersions(load.ProductLoaderPlugin): + """Deletes specific number of old version""" + + is_multiple_contexts_compatible = True + sequence_splitter = "__sequence_splitter__" + + representations = ["*"] + product_types = {"*"} + tool_names = ["library_loader"] + + label = "Delete Old Versions" + order = 35 + icon = "trash" + color = "#d8d8d8" + + options = [ + qargparse.Integer( + "versions_to_keep", default=2, min=0, help="Versions to keep:" + ), + qargparse.Boolean( + "remove_publish_folder", help="Remove publish folder:" + ) + ] + + def delete_whole_dir_paths(self, dir_paths, delete=True): + size = 0 + + for dir_path in dir_paths: + # Delete all files and fodlers in dir path + for root, dirs, files in os.walk(dir_path, topdown=False): + for name in files: + file_path = os.path.join(root, name) + size += os.path.getsize(file_path) + if delete: + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + + for name in dirs: + if delete: + os.rmdir(os.path.join(root, name)) + + if not delete: + continue + + # Delete even the folder and it's parents folders if they are empty + while True: + if not os.path.exists(dir_path): + dir_path = os.path.dirname(dir_path) + continue + + if len(os.listdir(dir_path)) != 0: + break + + os.rmdir(os.path.join(dir_path)) + + return size + + def path_from_representation(self, representation, anatomy): + try: + context = representation["context"] + except KeyError: + return (None, None) + + try: + path = get_representation_path_with_anatomy( + representation, anatomy + ) + except InvalidRepresentationContext: + return (None, None) + + sequence_path = None + if "frame" in context: + context["frame"] = self.sequence_splitter + sequence_path = get_representation_path_with_anatomy( + representation, anatomy + ) + + if sequence_path: + sequence_path = sequence_path.normalized() + + return (path.normalized(), sequence_path) + + def delete_only_repre_files(self, dir_paths, file_paths, delete=True): + size = 0 + + for dir_id, dir_path in dir_paths.items(): + dir_files = os.listdir(dir_path) + collections, remainders = clique.assemble(dir_files) + for file_path, seq_path in file_paths[dir_id]: + file_path_base = os.path.split(file_path)[1] + # Just remove file if `frame` key was not in context or + # filled path is in remainders (single file sequence) + if not seq_path or file_path_base in remainders: + if not os.path.exists(file_path): + self.log.debug( + "File was not found: {}".format(file_path) + ) + continue + + size += os.path.getsize(file_path) + + if delete: + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + + if file_path_base in remainders: + remainders.remove(file_path_base) + continue + + seq_path_base = os.path.split(seq_path)[1] + head, tail = seq_path_base.split(self.sequence_splitter) + + final_col = None + for collection in collections: + if head != collection.head or tail != collection.tail: + continue + final_col = collection + break + + if final_col is not None: + # Fill full path to head + final_col.head = os.path.join(dir_path, final_col.head) + for _file_path in final_col: + if os.path.exists(_file_path): + + size += os.path.getsize(_file_path) + + if delete: + os.remove(_file_path) + self.log.debug( + "Removed file: {}".format(_file_path) + ) + + _seq_path = final_col.format("{head}{padding}{tail}") + self.log.debug("Removed files: {}".format(_seq_path)) + collections.remove(final_col) + + elif os.path.exists(file_path): + size += os.path.getsize(file_path) + + if delete: + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + else: + self.log.debug( + "File was not found: {}".format(file_path) + ) + + # Delete as much as possible parent folders + if not delete: + return size + + for dir_path in dir_paths.values(): + while True: + if not os.path.exists(dir_path): + dir_path = os.path.dirname(dir_path) + continue + + if len(os.listdir(dir_path)) != 0: + break + + self.log.debug("Removed folder: {}".format(dir_path)) + os.rmdir(dir_path) + + return size + + def message(self, text): + msgBox = QtWidgets.QMessageBox() + msgBox.setText(text) + msgBox.setStyleSheet(style.load_stylesheet()) + msgBox.setWindowFlags( + msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint + ) + msgBox.exec_() + + def get_data(self, context, versions_count): + product_entity = context["product"] + folder_entity = context["folder"] + project_name = context["project"]["name"] + anatomy = Anatomy(project_name, project_entity=context["project"]) + + version_fields = ayon_api.get_default_fields_for_type("version") + version_fields.add("tags") + versions = list(ayon_api.get_versions( + project_name, + product_ids=[product_entity["id"]], + active=None, + hero=False, + fields=version_fields + )) + self.log.debug( + "Version Number ({})".format(len(versions)) + ) + versions_by_parent = collections.defaultdict(list) + for ent in versions: + versions_by_parent[ent["productId"]].append(ent) + + def sort_func(ent): + return int(ent["version"]) + + all_last_versions = [] + for _parent_id, _versions in versions_by_parent.items(): + for idx, version in enumerate( + sorted(_versions, key=sort_func, reverse=True) + ): + if idx >= versions_count: + break + all_last_versions.append(version) + + self.log.debug("Collected versions ({})".format(len(versions))) + + # Filter latest versions + for version in all_last_versions: + versions.remove(version) + + # Update versions_by_parent without filtered versions + versions_by_parent = collections.defaultdict(list) + for ent in versions: + versions_by_parent[ent["productId"]].append(ent) + + # Filter already deleted versions + versions_to_pop = [] + for version in versions: + if "deleted" in version["tags"]: + versions_to_pop.append(version) + + for version in versions_to_pop: + msg = "Folder: \"{}\" | Product: \"{}\" | Version: \"{}\"".format( + folder_entity["path"], + product_entity["name"], + version["version"] + ) + self.log.debug(( + "Skipping version. Already tagged as inactive. < {} >" + ).format(msg)) + versions.remove(version) + + version_ids = [ent["id"] for ent in versions] + + self.log.debug( + "Filtered versions to delete ({})".format(len(version_ids)) + ) + + if not version_ids: + msg = "Skipping processing. Nothing to delete on {}/{}".format( + folder_entity["path"], product_entity["name"] + ) + self.log.info(msg) + print(msg) + return + + repres = list(ayon_api.get_representations( + project_name, version_ids=version_ids + )) + + self.log.debug( + "Collected representations to remove ({})".format(len(repres)) + ) + + dir_paths = {} + file_paths_by_dir = collections.defaultdict(list) + for repre in repres: + file_path, seq_path = self.path_from_representation( + repre, anatomy + ) + if file_path is None: + self.log.debug(( + "Could not format path for represenation \"{}\"" + ).format(str(repre))) + continue + + dir_path = os.path.dirname(file_path) + dir_id = None + for _dir_id, _dir_path in dir_paths.items(): + if _dir_path == dir_path: + dir_id = _dir_id + break + + if dir_id is None: + dir_id = uuid.uuid4() + dir_paths[dir_id] = dir_path + + file_paths_by_dir[dir_id].append([file_path, seq_path]) + + dir_ids_to_pop = [] + for dir_id, dir_path in dir_paths.items(): + if os.path.exists(dir_path): + continue + + dir_ids_to_pop.append(dir_id) + + # Pop dirs from both dictionaries + for dir_id in dir_ids_to_pop: + dir_paths.pop(dir_id) + paths = file_paths_by_dir.pop(dir_id) + # TODO report of missing directories? + paths_msg = ", ".join([ + "'{}'".format(path[0].replace("\\", "/")) for path in paths + ]) + self.log.debug(( + "Folder does not exist. Deleting its files skipped: {}" + ).format(paths_msg)) + + return { + "dir_paths": dir_paths, + "file_paths_by_dir": file_paths_by_dir, + "versions": versions, + "folder": folder_entity, + "product": product_entity, + "archive_product": versions_count == 0 + } + + def main(self, project_name, data, remove_publish_folder): + # Size of files. + size = 0 + if not data: + return size + + if remove_publish_folder: + size = self.delete_whole_dir_paths(data["dir_paths"].values()) + else: + size = self.delete_only_repre_files( + data["dir_paths"], data["file_paths_by_dir"] + ) + + op_session = OperationsSession() + for version in data["versions"]: + orig_version_tags = version["tags"] + version_tags = list(orig_version_tags) + changes = {} + if "deleted" not in version_tags: + version_tags.append("deleted") + changes["tags"] = version_tags + + if version["active"]: + changes["active"] = False + + if not changes: + continue + op_session.update_entity( + project_name, "version", version["id"], changes + ) + + op_session.commit() + + return size + + def load(self, contexts, name=None, namespace=None, options=None): + try: + size = 0 + for count, context in enumerate(contexts): + versions_to_keep = 2 + remove_publish_folder = False + if options: + versions_to_keep = options.get( + "versions_to_keep", versions_to_keep + ) + remove_publish_folder = options.get( + "remove_publish_folder", remove_publish_folder + ) + + data = self.get_data(context, versions_to_keep) + if not data: + continue + project_name = context["project"]["name"] + size += self.main(project_name, data, remove_publish_folder) + print("Progressing {}/{}".format(count + 1, len(contexts))) + + msg = "Total size of files: {}".format(format_file_size(size)) + self.log.info(msg) + self.message(msg) + + except Exception: + self.log.error("Failed to delete versions.", exc_info=True) + + +class CalculateOldVersions(DeleteOldVersions): + """Calculate file size of old versions""" + label = "Calculate Old Versions" + order = 30 + tool_names = ["library_loader"] + + options = [ + qargparse.Integer( + "versions_to_keep", default=2, min=0, help="Versions to keep:" + ), + qargparse.Boolean( + "remove_publish_folder", help="Remove publish folder:" + ) + ] + + def main(self, project_name, data, remove_publish_folder): + size = 0 + + if not data: + return size + + if remove_publish_folder: + size = self.delete_whole_dir_paths( + data["dir_paths"].values(), delete=False + ) + else: + size = self.delete_only_repre_files( + data["dir_paths"], data["file_paths_by_dir"], delete=False + ) + + return size diff --git a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py index f62a2f59df..b4d02f2ba5 100644 --- a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py +++ b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py @@ -33,6 +33,7 @@ import collections import pyblish.api import ayon_api +from ayon_core.pipeline.template_data import get_folder_template_data from ayon_core.pipeline.version_start import get_versioning_start @@ -390,24 +391,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): # - 'folder', 'hierarchy', 'parent', 'folder' folder_entity = instance.data.get("folderEntity") if folder_entity: - folder_name = folder_entity["name"] - folder_path = folder_entity["path"] - hierarchy_parts = folder_path.split("/") - hierarchy_parts.pop(0) - hierarchy_parts.pop(-1) - parent_name = project_entity["name"] - if hierarchy_parts: - parent_name = hierarchy_parts[-1] - - hierarchy = "/".join(hierarchy_parts) - anatomy_data.update({ - "asset": folder_name, - "hierarchy": hierarchy, - "parent": parent_name, - "folder": { - "name": folder_name, - }, - }) + folder_data = get_folder_template_data( + folder_entity, + project_entity["name"] + ) + anatomy_data.update(folder_data) return if instance.data.get("newAssetPublishing"): @@ -425,6 +413,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): "parent": parent_name, "folder": { "name": folder_name, + "path": instance.data["folderPath"], + # TODO get folder type from hierarchy + # Using 'Shot' is current default behavior of editorial + # (or 'newAssetPublishing') publishing. + "type": "Shot", }, }) diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py index ce34f2e88b..865b566e6e 100644 --- a/client/ayon_core/plugins/publish/integrate.py +++ b/client/ayon_core/plugins/publish/integrate.py @@ -42,7 +42,7 @@ def prepare_changes(old_entity, new_entity): Returns: dict[str, Any]: Changes that have new entity. - + """ changes = {} for key in set(new_entity.keys()): @@ -108,67 +108,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin): label = "Integrate Asset" order = pyblish.api.IntegratorOrder - families = ["workfile", - "pointcache", - "pointcloud", - "proxyAbc", - "camera", - "animation", - "model", - "maxScene", - "mayaAscii", - "mayaScene", - "setdress", - "layout", - "ass", - "vdbcache", - "scene", - "vrayproxy", - "vrayscene_layer", - "render", - "prerender", - "imagesequence", - "review", - "rendersetup", - "rig", - "plate", - "look", - "ociolook", - "audio", - "yetiRig", - "yeticache", - "nukenodes", - "gizmo", - "source", - "matchmove", - "image", - "assembly", - "fbx", - "gltf", - "textures", - "action", - "harmony.template", - "harmony.palette", - "editorial", - "background", - "camerarig", - "redshiftproxy", - "effect", - "xgen", - "hda", - "usd", - "staticMesh", - "skeletalMesh", - "mvLook", - "mvUsd", - "mvUsdComposition", - "mvUsdOverride", - "online", - "uasset", - "blendScene", - "yeticacheUE", - "tycache" - ] default_template_name = "publish" @@ -358,7 +297,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Compute the resource file infos once (files belonging to the # version instance instead of an individual representation) so - # we can re-use those file infos per representation + # we can reuse those file infos per representation resource_file_infos = self.get_files_info( resource_destinations, anatomy ) diff --git a/client/ayon_core/plugins/publish/validate_containers.py b/client/ayon_core/plugins/publish/validate_containers.py index bd21ec9693..520e7a7ce9 100644 --- a/client/ayon_core/plugins/publish/validate_containers.py +++ b/client/ayon_core/plugins/publish/validate_containers.py @@ -1,6 +1,11 @@ import pyblish.api + +from ayon_core.lib import filter_profiles +from ayon_core.host import ILoadHost from ayon_core.pipeline.load import any_outdated_containers from ayon_core.pipeline import ( + get_current_host_name, + registered_host, PublishXmlValidationError, OptionalPyblishPluginMixin ) @@ -18,17 +23,50 @@ class ShowInventory(pyblish.api.Action): host_tools.show_scene_inventory() -class ValidateContainers(OptionalPyblishPluginMixin, - pyblish.api.ContextPlugin): - +class ValidateOutdatedContainers( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Containers are must be updated to latest version on publish.""" label = "Validate Outdated Containers" order = pyblish.api.ValidatorOrder - hosts = ["maya", "houdini", "nuke", "harmony", "photoshop", "aftereffects"] + optional = True actions = [ShowInventory] + @classmethod + def apply_settings(cls, settings): + # Disable plugin if host does not inherit from 'ILoadHost' + # - not a host that can load containers + host = registered_host() + if not isinstance(host, ILoadHost): + cls.enabled = False + return + + # Disable if no profile is found for the current host + profiles = ( + settings + ["core"] + ["publish"] + ["ValidateOutdatedContainers"] + ["plugin_state_profiles"] + ) + profile = filter_profiles( + profiles, {"host_names": get_current_host_name()} + ) + if not profile: + cls.enabled = False + return + + # Apply settings from profile + for attr_name in { + "enabled", + "optional", + "active", + }: + setattr(cls, attr_name, profile[attr_name]) + def process(self, context): if not self.is_active(context.data): return diff --git a/client/ayon_core/resources/app_icons/3de4.png b/client/ayon_core/resources/app_icons/3de4.png new file mode 100644 index 0000000000..bd0fe40d37 Binary files /dev/null and b/client/ayon_core/resources/app_icons/3de4.png differ diff --git a/client/ayon_core/scripts/ocio_wrapper.py b/client/ayon_core/scripts/ocio_wrapper.py index 0a78e33c1f..0414fc59ce 100644 --- a/client/ayon_core/scripts/ocio_wrapper.py +++ b/client/ayon_core/scripts/ocio_wrapper.py @@ -1,28 +1,31 @@ """OpenColorIO Wrapper. -Only to be interpreted by Python 3. It is run in subprocess in case -Python 2 hosts needs to use it. Or it is used as module for Python 3 -processing. - -Providing functionality: -- get_colorspace - console command - python 2 - - returning all available color spaces - found in input config path. -- _get_colorspace_data - python 3 - module function - - returning all available colorspaces - found in input config path. -- get_views - console command - python 2 - - returning all available viewers - found in input config path. -- _get_views_data - python 3 - module function - - returning all available viewers - found in input config path. +Receive OpenColorIO information and store it in JSON format for processed +that don't have access to OpenColorIO or their version of OpenColorIO is +not compatible. """ -import click import json from pathlib import Path -import PyOpenColorIO as ocio + +import click + +from ayon_core.pipeline.colorspace import ( + has_compatible_ocio_package, + get_display_view_colorspace_name, + get_config_file_rules_colorspace_from_filepath, + get_config_version_data, + get_ocio_config_views, + get_ocio_config_colorspaces, +) + + +def _save_output_to_json_file(output, output_path): + json_path = Path(output_path) + with open(json_path, "w") as stream: + json.dump(output, stream) + + print(f"Data are saved to '{json_path}'") @click.group() @@ -30,404 +33,185 @@ def main(): pass # noqa: WPS100 -@main.group() -def config(): - """Config related commands group - - Example of use: - > pyton.exe ./ocio_wrapper.py config *args - """ - pass # noqa: WPS100 - - -@main.group() -def colorspace(): - """Colorspace related commands group - - Example of use: - > pyton.exe ./ocio_wrapper.py config *args - """ - pass # noqa: WPS100 - - -@config.command( - name="get_colorspace", - help=( - "return all colorspaces from config file " - "--path input arg is required" - ) -) -@click.option("--in_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_colorspace(in_path, out_path): +@main.command( + name="get_ocio_config_colorspaces", + help="return all colorspaces from config file") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_ocio_config_colorspaces(config_path, output_path): """Aggregate all colorspace to file. - Python 2 wrapped console command - Args: - in_path (str): config file path string - out_path (str): temp json file path string + config_path (str): config file path string + output_path (str): temp json file path string Example of use: > pyton.exe ./ocio_wrapper.py config get_colorspace - --in_path= --out_path= + --config_path --output_path """ - json_path = Path(out_path) - - out_data = _get_colorspace_data(in_path) - - with open(json_path, "w") as f_: - json.dump(out_data, f_) - - print(f"Colorspace data are saved to '{json_path}'") - - -def _get_colorspace_data(config_path): - """Return all found colorspace data. - - Args: - config_path (str): path string leading to config.ocio - - Raises: - IOError: Input config does not exist. - - Returns: - dict: aggregated available colorspaces - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError( - f"Input path `{config_path}` should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - colorspace_data = { - "roles": {}, - "colorspaces": { - color.getName(): { - "family": color.getFamily(), - "categories": list(color.getCategories()), - "aliases": list(color.getAliases()), - "equalitygroup": color.getEqualityGroup(), - } - for color in config.getColorSpaces() - }, - "displays_views": { - f"{view} ({display})": { - "display": display, - "view": view - - } - for display in config.getDisplays() - for view in config.getViews(display) - }, - "looks": {} - } - - # add looks - looks = config.getLooks() - if looks: - colorspace_data["looks"] = { - look.getName(): {"process_space": look.getProcessSpace()} - for look in looks - } - - # add roles - roles = config.getRoles() - if roles: - colorspace_data["roles"] = { - role: {"colorspace": colorspace} - for (role, colorspace) in roles - } - - return colorspace_data - - -@config.command( - name="get_views", - help=( - "return all viewers from config file " - "--path input arg is required" + _save_output_to_json_file( + get_ocio_config_colorspaces(config_path), + output_path ) -) -@click.option("--in_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_views(in_path, out_path): + + +@main.command( + name="get_ocio_config_views", + help="All viewers from config file") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_ocio_config_views(config_path, output_path): """Aggregate all viewers to file. - Python 2 wrapped console command - Args: - in_path (str): config file path string - out_path (str): temp json file path string + config_path (str): config file path string + output_path (str): temp json file path string Example of use: > pyton.exe ./ocio_wrapper.py config get_views \ - --in_path= --out_path= + --config_path --output """ - json_path = Path(out_path) - - out_data = _get_views_data(in_path) - - with open(json_path, "w") as f_: - json.dump(out_data, f_) - - print(f"Viewer data are saved to '{json_path}'") - - -def _get_views_data(config_path): - """Return all found viewer data. - - Args: - config_path (str): path string leading to config.ocio - - Raises: - IOError: Input config does not exist. - - Returns: - dict: aggregated available viewers - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError("Input path should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - data_ = {} - for display in config.getDisplays(): - for view in config.getViews(display): - colorspace = config.getDisplayViewColorSpaceName(display, view) - # Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa - if colorspace == "": - colorspace = display - - data_[f"{display}/{view}"] = { - "display": display, - "view": view, - "colorspace": colorspace - } - - return data_ - - -@config.command( - name="get_version", - help=( - "return major and minor version from config file " - "--config_path input arg is required" - "--out_path input arg is required" + _save_output_to_json_file( + get_ocio_config_views(config_path), + output_path ) -) -@click.option("--config_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_version(config_path, out_path): - """Get version of config. - Python 2 wrapped console command + +@main.command( + name="get_config_version_data", + help="Get major and minor version from config file") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_config_version_data(config_path, output_path): + """Get version of config. Args: config_path (str): ocio config file path string - out_path (str): temp json file path string + output_path (str): temp json file path string Example of use: > pyton.exe ./ocio_wrapper.py config get_version \ - --config_path= --out_path= + --config_path --output_path """ - json_path = Path(out_path) - - out_data = _get_version_data(config_path) - - with open(json_path, "w") as f_: - json.dump(out_data, f_) - - print(f"Config version data are saved to '{json_path}'") - - -def _get_version_data(config_path): - """Return major and minor version info. - - Args: - config_path (str): path string leading to config.ocio - - Raises: - IOError: Input config does not exist. - - Returns: - dict: minor and major keys with values - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError("Input path should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - return { - "major": config.getMajorVersion(), - "minor": config.getMinorVersion() - } - - -@colorspace.command( - name="get_config_file_rules_colorspace_from_filepath", - help=( - "return colorspace from filepath " - "--config_path - ocio config file path (input arg is required) " - "--filepath - any file path (input arg is required) " - "--out_path - temp json file path (input arg is required)" + _save_output_to_json_file( + get_config_version_data(config_path), + output_path ) -) -@click.option("--config_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--filepath", required=True, - help="path to file to get colorspace from", - type=click.Path()) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_config_file_rules_colorspace_from_filepath( - config_path, filepath, out_path + + +@main.command( + name="get_config_file_rules_colorspace_from_filepath", + help="Colorspace file rules from filepath") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--filepath", + required=True, + help="Path to file to get colorspace from.", + type=click.Path()) +@click.option( + "--output_path", + required=True, + help="Path where to write output json file.", + type=click.Path()) +def _get_config_file_rules_colorspace_from_filepath( + config_path, filepath, output_path ): """Get colorspace from file path wrapper. - Python 2 wrapped console command - Args: config_path (str): config file path string filepath (str): path string leading to file - out_path (str): temp json file path string + output_path (str): temp json file path string Example of use: - > pyton.exe ./ocio_wrapper.py \ + > python.exe ./ocio_wrapper.py \ colorspace get_config_file_rules_colorspace_from_filepath \ - --config_path= --filepath= --out_path= + --config_path --filepath --output_path """ - json_path = Path(out_path) - - colorspace = _get_config_file_rules_colorspace_from_filepath( - config_path, filepath) - - with open(json_path, "w") as f_: - json.dump(colorspace, f_) - - print(f"Colorspace name is saved to '{json_path}'") + _save_output_to_json_file( + get_config_file_rules_colorspace_from_filepath(config_path, filepath), + output_path + ) -def _get_config_file_rules_colorspace_from_filepath(config_path, filepath): - """Return found colorspace data found in v2 file rules. - - Args: - config_path (str): path string leading to config.ocio - filepath (str): path string leading to v2 file rules - - Raises: - IOError: Input config does not exist. - - Returns: - dict: aggregated available colorspaces - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError( - f"Input path `{config_path}` should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - # TODO: use `parseColorSpaceFromString` instead if ocio v1 - colorspace = config.getColorSpaceFromFilepath(str(filepath)) - - return colorspace - - -def _get_display_view_colorspace_name(config_path, display, view): - """Returns the colorspace attribute of the (display, view) pair. - - Args: - config_path (str): path string leading to config.ocio - display (str): display name e.g. "ACES" - view (str): view name e.g. "sRGB" - - - Raises: - IOError: Input config does not exist. - - Returns: - view color space name (str) e.g. "Output - sRGB" - """ - - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError("Input path should be `config.ocio` file") - - config = ocio.Config.CreateFromFile(str(config_path)) - colorspace = config.getDisplayViewColorSpaceName(display, view) - - return colorspace - - -@config.command( +@main.command( name="get_display_view_colorspace_name", help=( - "return default view colorspace name " - "for the given display and view " - "--path input arg is required" - ) -) -@click.option("--in_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -@click.option("--display", required=True, - help="display name", - type=click.STRING) -@click.option("--view", required=True, - help="view name", - type=click.STRING) -def get_display_view_colorspace_name(in_path, out_path, - display, view): + "Default view colorspace name for the given display and view" + )) +@click.option( + "--config_path", + required=True, + help="path where to read ocio config file", + type=click.Path(exists=True)) +@click.option( + "--display", + required=True, + help="Display name", + type=click.STRING) +@click.option( + "--view", + required=True, + help="view name", + type=click.STRING) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_display_view_colorspace_name( + config_path, display, view, output_path +): """Aggregate view colorspace name to file. Wrapper command for processes without access to OpenColorIO Args: - in_path (str): config file path string - out_path (str): temp json file path string + config_path (str): config file path string + output_path (str): temp json file path string display (str): display name e.g. "ACES" view (str): view name e.g. "sRGB" Example of use: > pyton.exe ./ocio_wrapper.py config \ - get_display_view_colorspace_name --in_path= \ - --out_path= --display= --view= + get_display_view_colorspace_name --config_path \ + --output_path --display --view """ + _save_output_to_json_file( + get_display_view_colorspace_name(config_path, display, view), + output_path + ) - out_data = _get_display_view_colorspace_name(in_path, - display, - view) - with open(out_path, "w") as f: - json.dump(out_data, f) - - print(f"Display view colorspace saved to '{out_path}'") - -if __name__ == '__main__': +if __name__ == "__main__": + if not has_compatible_ocio_package(): + raise RuntimeError("OpenColorIO is not available.") main() diff --git a/client/ayon_core/tools/adobe_webserver/app.py b/client/ayon_core/tools/adobe_webserver/app.py index 7d97d7d66d..26bf638c91 100644 --- a/client/ayon_core/tools/adobe_webserver/app.py +++ b/client/ayon_core/tools/adobe_webserver/app.py @@ -104,14 +104,11 @@ class WebServerTool: again. In that case, use existing running webserver. Check here is easier than capturing exception from thread. """ - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - result = True - try: - sock.bind((host_name, port)) - result = False - except: - print("Port is in use") + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con: + result = con.connect_ex((host_name, port)) == 0 + if result: + print(f"Port {port} is already in use") return result def call(self, func): diff --git a/client/ayon_core/tools/common_models/cache.py b/client/ayon_core/tools/common_models/cache.py index 221a14160c..59b727728f 100644 --- a/client/ayon_core/tools/common_models/cache.py +++ b/client/ayon_core/tools/common_models/cache.py @@ -1,239 +1,31 @@ -import time -import collections +import warnings -InitInfo = collections.namedtuple( - "InitInfo", - ["default_factory", "lifetime"] +from ayon_core.lib import CacheItem as _CacheItem +from ayon_core.lib import NestedCacheItem as _NestedCacheItem + + +# Cache classes were moved to `ayon_core.lib.cache` +class CacheItem(_CacheItem): + def __init__(self, *args, **kwargs): + warnings.warn( + "Used 'CacheItem' from deprecated location " + "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + + +class NestedCacheItem(_NestedCacheItem): + def __init__(self, *args, **kwargs): + warnings.warn( + "Used 'NestedCacheItem' from deprecated location " + "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + + +__all__ = ( + "CacheItem", + "NestedCacheItem", ) - - -def _default_factory_func(): - return None - - -class CacheItem: - """Simple cache item with lifetime and default value. - - Args: - default_factory (Optional[callable]): Function that returns default - value used on init and on reset. - lifetime (Optional[int]): Lifetime of the cache data in seconds. - """ - - def __init__(self, default_factory=None, lifetime=None): - if lifetime is None: - lifetime = 120 - self._lifetime = lifetime - self._last_update = None - if default_factory is None: - default_factory = _default_factory_func - self._default_factory = default_factory - self._data = default_factory() - - @property - def is_valid(self): - """Is cache valid to use. - - Return: - bool: True if cache is valid, False otherwise. - """ - - if self._last_update is None: - return False - - return (time.time() - self._last_update) < self._lifetime - - def set_lifetime(self, lifetime): - """Change lifetime of cache item. - - Args: - lifetime (int): Lifetime of the cache data in seconds. - """ - - self._lifetime = lifetime - - def set_invalid(self): - """Set cache as invalid.""" - - self._last_update = None - - def reset(self): - """Set cache as invalid and reset data.""" - - self._last_update = None - self._data = self._default_factory() - - def get_data(self): - """Receive cached data. - - Returns: - Any: Any data that are cached. - """ - - return self._data - - def update_data(self, data): - self._data = data - self._last_update = time.time() - - -class NestedCacheItem: - """Helper for cached items stored in nested structure. - - Example: - >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0) - >>> cache["a"]["b"].is_valid - False - >>> cache["a"]["b"].get_data() - 0 - >>> cache["a"]["b"] = 1 - >>> cache["a"]["b"].is_valid - True - >>> cache["a"]["b"].get_data() - 1 - >>> cache.reset() - >>> cache["a"]["b"].is_valid - False - - Args: - levels (int): Number of nested levels where read cache is stored. - default_factory (Optional[callable]): Function that returns default - value used on init and on reset. - lifetime (Optional[int]): Lifetime of the cache data in seconds. - _init_info (Optional[InitInfo]): Private argument. Init info for - nested cache where created from parent item. - """ - - def __init__( - self, levels=1, default_factory=None, lifetime=None, _init_info=None - ): - if levels < 1: - raise ValueError("Nested levels must be greater than 0") - self._data_by_key = {} - if _init_info is None: - _init_info = InitInfo(default_factory, lifetime) - self._init_info = _init_info - self._levels = levels - - def __getitem__(self, key): - """Get cached data. - - Args: - key (str): Key of the cache item. - - Returns: - Union[NestedCacheItem, CacheItem]: Cache item. - """ - - cache = self._data_by_key.get(key) - if cache is None: - if self._levels > 1: - cache = NestedCacheItem( - levels=self._levels - 1, - _init_info=self._init_info - ) - else: - cache = CacheItem( - self._init_info.default_factory, - self._init_info.lifetime - ) - self._data_by_key[key] = cache - return cache - - def __setitem__(self, key, value): - """Update cached data. - - Args: - key (str): Key of the cache item. - value (Any): Any data that are cached. - """ - - if self._levels > 1: - raise AttributeError(( - "{} does not support '__setitem__'. Lower nested level by {}" - ).format(self.__class__.__name__, self._levels - 1)) - cache = self[key] - cache.update_data(value) - - def get(self, key): - """Get cached data. - - Args: - key (str): Key of the cache item. - - Returns: - Union[NestedCacheItem, CacheItem]: Cache item. - """ - - return self[key] - - def cached_count(self): - """Amount of cached items. - - Returns: - int: Amount of cached items. - """ - - return len(self._data_by_key) - - def clear_key(self, key): - """Clear cached item by key. - - Args: - key (str): Key of the cache item. - """ - - self._data_by_key.pop(key, None) - - def clear_invalid(self): - """Clear all invalid cache items. - - Note: - To clear all cache items use 'reset'. - """ - - changed = {} - children_are_nested = self._levels > 1 - for key, cache in tuple(self._data_by_key.items()): - if children_are_nested: - output = cache.clear_invalid() - if output: - changed[key] = output - if not cache.cached_count(): - self._data_by_key.pop(key) - elif not cache.is_valid: - changed[key] = cache.get_data() - self._data_by_key.pop(key) - return changed - - def reset(self): - """Reset cache. - - Note: - To clear only invalid cache items use 'clear_invalid'. - """ - - self._data_by_key = {} - - def set_lifetime(self, lifetime): - """Change lifetime of all children cache items. - - Args: - lifetime (int): Lifetime of the cache data in seconds. - """ - - self._init_info.lifetime = lifetime - for cache in self._data_by_key.values(): - cache.set_lifetime(lifetime) - - @property - def is_valid(self): - """Raise reasonable error when called on wront level. - - Raises: - AttributeError: If called on nested cache item. - """ - - raise AttributeError(( - "{} does not support 'is_valid'. Lower nested level by '{}'" - ).format(self.__class__.__name__, self._levels)) diff --git a/client/ayon_core/tools/common_models/hierarchy.py b/client/ayon_core/tools/common_models/hierarchy.py index d8b28f020d..78b8a7f492 100644 --- a/client/ayon_core/tools/common_models/hierarchy.py +++ b/client/ayon_core/tools/common_models/hierarchy.py @@ -6,8 +6,7 @@ import ayon_api import six from ayon_core.style import get_default_entity_icon_color - -from .cache import NestedCacheItem +from ayon_core.lib import NestedCacheItem HIERARCHY_MODEL_SENDER = "hierarchy.model" diff --git a/client/ayon_core/tools/common_models/projects.py b/client/ayon_core/tools/common_models/projects.py index e30561000e..19a38bee21 100644 --- a/client/ayon_core/tools/common_models/projects.py +++ b/client/ayon_core/tools/common_models/projects.py @@ -5,8 +5,7 @@ import ayon_api import six from ayon_core.style import get_default_entity_icon_color - -from .cache import CacheItem +from ayon_core.lib import CacheItem PROJECTS_MODEL_SENDER = "projects.model" diff --git a/client/ayon_core/tools/common_models/thumbnails.py b/client/ayon_core/tools/common_models/thumbnails.py index 1c3aadc49f..2fa1e36e5c 100644 --- a/client/ayon_core/tools/common_models/thumbnails.py +++ b/client/ayon_core/tools/common_models/thumbnails.py @@ -1,234 +1,15 @@ -import os -import time import collections import ayon_api -import appdirs -from .cache import NestedCacheItem - -FileInfo = collections.namedtuple( - "FileInfo", - ("path", "size", "modification_time") -) - - -class ThumbnailsCache: - """Cache of thumbnails on local storage. - - Thumbnails are cached to appdirs to predefined directory. Each project has - own subfolder with thumbnails -> that's because each project has own - thumbnail id validation and file names are thumbnail ids with matching - extension. Extensions are predefined (.png and .jpeg). - - Cache has cleanup mechanism which is triggered on initialized by default. - - The cleanup has 2 levels: - 1. soft cleanup which remove all files that are older then 'days_alive' - 2. max size cleanup which remove all files until the thumbnails folder - contains less then 'max_filesize' - - this is time consuming so it's not triggered automatically - - Args: - cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails). - """ - - # Lifetime of thumbnails (in seconds) - # - default 3 days - days_alive = 3 - # Max size of thumbnail directory (in bytes) - # - default 2 Gb - max_filesize = 2 * 1024 * 1024 * 1024 - - def __init__(self, cleanup=True): - self._thumbnails_dir = None - self._days_alive_secs = self.days_alive * 24 * 60 * 60 - if cleanup: - self.cleanup() - - def get_thumbnails_dir(self): - """Root directory where thumbnails are stored. - - Returns: - str: Path to thumbnails root. - """ - - if self._thumbnails_dir is None: - # TODO use generic function - directory = appdirs.user_data_dir("AYON", "Ynput") - self._thumbnails_dir = os.path.join(directory, "thumbnails") - return self._thumbnails_dir - - thumbnails_dir = property(get_thumbnails_dir) - - def get_thumbnails_dir_file_info(self): - """Get information about all files in thumbnails directory. - - Returns: - List[FileInfo]: List of file information about all files. - """ - - thumbnails_dir = self.thumbnails_dir - files_info = [] - if not os.path.exists(thumbnails_dir): - return files_info - - for root, _, filenames in os.walk(thumbnails_dir): - for filename in filenames: - path = os.path.join(root, filename) - files_info.append(FileInfo( - path, os.path.getsize(path), os.path.getmtime(path) - )) - return files_info - - def get_thumbnails_dir_size(self, files_info=None): - """Got full size of thumbnail directory. - - Args: - files_info (List[FileInfo]): Prepared file information about - files in thumbnail directory. - - Returns: - int: File size of all files in thumbnail directory. - """ - - if files_info is None: - files_info = self.get_thumbnails_dir_file_info() - - if not files_info: - return 0 - - return sum( - file_info.size - for file_info in files_info - ) - - def cleanup(self, check_max_size=False): - """Cleanup thumbnails directory. - - Args: - check_max_size (bool): Also cleanup files to match max size of - thumbnails directory. - """ - - thumbnails_dir = self.get_thumbnails_dir() - # Skip if thumbnails dir does not exist yet - if not os.path.exists(thumbnails_dir): - return - - self._soft_cleanup(thumbnails_dir) - if check_max_size: - self._max_size_cleanup(thumbnails_dir) - - def _soft_cleanup(self, thumbnails_dir): - current_time = time.time() - for root, _, filenames in os.walk(thumbnails_dir): - for filename in filenames: - path = os.path.join(root, filename) - modification_time = os.path.getmtime(path) - if current_time - modification_time > self._days_alive_secs: - os.remove(path) - - def _max_size_cleanup(self, thumbnails_dir): - files_info = self.get_thumbnails_dir_file_info() - size = self.get_thumbnails_dir_size(files_info) - if size < self.max_filesize: - return - - sorted_file_info = collections.deque( - sorted(files_info, key=lambda item: item.modification_time) - ) - diff = size - self.max_filesize - while diff > 0: - if not sorted_file_info: - break - - file_info = sorted_file_info.popleft() - diff -= file_info.size - os.remove(file_info.path) - - def get_thumbnail_filepath(self, project_name, thumbnail_id): - """Get thumbnail by thumbnail id. - - Args: - project_name (str): Name of project. - thumbnail_id (str): Thumbnail id. - - Returns: - Union[str, None]: Path to thumbnail image or None if thumbnail - is not cached yet. - """ - - if not thumbnail_id: - return None - - for ext in ( - ".png", - ".jpeg", - ): - filepath = os.path.join( - self.thumbnails_dir, project_name, thumbnail_id + ext - ) - if os.path.exists(filepath): - return filepath - return None - - def get_project_dir(self, project_name): - """Path to root directory for specific project. - - Args: - project_name (str): Name of project for which root directory path - should be returned. - - Returns: - str: Path to root of project's thumbnails. - """ - - return os.path.join(self.thumbnails_dir, project_name) - - def make_sure_project_dir_exists(self, project_name): - project_dir = self.get_project_dir(project_name) - if not os.path.exists(project_dir): - os.makedirs(project_dir) - return project_dir - - def store_thumbnail(self, project_name, thumbnail_id, content, mime_type): - """Store thumbnail to cache folder. - - Args: - project_name (str): Project where the thumbnail belong to. - thumbnail_id (str): Id of thumbnail. - content (bytes): Byte content of thumbnail file. - mime_data (str): Type of content. - - Returns: - str: Path to cached thumbnail image file. - """ - - if mime_type == "image/png": - ext = ".png" - elif mime_type == "image/jpeg": - ext = ".jpeg" - else: - raise ValueError( - "Unknown mime type for thumbnail \"{}\"".format(mime_type)) - - project_dir = self.make_sure_project_dir_exists(project_name) - thumbnail_path = os.path.join(project_dir, thumbnail_id + ext) - with open(thumbnail_path, "wb") as stream: - stream.write(content) - - current_time = time.time() - os.utime(thumbnail_path, (current_time, current_time)) - - return thumbnail_path +from ayon_core.lib import NestedCacheItem +from ayon_core.pipeline.thumbnails import get_thumbnail_path class ThumbnailsModel: entity_cache_lifetime = 240 # In seconds def __init__(self): - self._thumbnail_cache = ThumbnailsCache() self._paths_cache = collections.defaultdict(dict) self._folders_cache = NestedCacheItem( levels=2, lifetime=self.entity_cache_lifetime) @@ -283,28 +64,7 @@ class ThumbnailsModel: if thumbnail_id in project_cache: return project_cache[thumbnail_id] - filepath = self._thumbnail_cache.get_thumbnail_filepath( - project_name, thumbnail_id - ) - if filepath is not None: - project_cache[thumbnail_id] = filepath - return filepath - - # 'ayon_api' had a bug, public function - # 'get_thumbnail_by_id' did not return output of - # 'ServerAPI' method. - con = ayon_api.get_server_api_connection() - result = con.get_thumbnail_by_id(project_name, thumbnail_id) - if result is None: - pass - - elif result.is_valid: - filepath = self._thumbnail_cache.store_thumbnail( - project_name, - thumbnail_id, - result.content, - result.content_type - ) + filepath = get_thumbnail_path(project_name, thumbnail_id) project_cache[thumbnail_id] = filepath return filepath diff --git a/client/ayon_core/tools/launcher/ui/actions_widget.py b/client/ayon_core/tools/launcher/ui/actions_widget.py index a225827418..2ffce13292 100644 --- a/client/ayon_core/tools/launcher/ui/actions_widget.py +++ b/client/ayon_core/tools/launcher/ui/actions_widget.py @@ -290,6 +290,34 @@ class ActionDelegate(QtWidgets.QStyledItemDelegate): painter.drawPixmap(extender_x, extender_y, pix) +class ActionsProxyModel(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + def lessThan(self, left, right): + # Sort by action order and then by label + left_value = left.data(ACTION_SORT_ROLE) + right_value = right.data(ACTION_SORT_ROLE) + + # Values are same -> use super sorting + if left_value == right_value: + # Default behavior is using DisplayRole + return super().lessThan(left, right) + + # Validate 'None' values + if right_value is None: + return True + if left_value is None: + return False + # Sort values and handle incompatible types + try: + return left_value < right_value + except TypeError: + return True + + class ActionsWidget(QtWidgets.QWidget): def __init__(self, controller, parent): super(ActionsWidget, self).__init__(parent) @@ -316,10 +344,7 @@ class ActionsWidget(QtWidgets.QWidget): model = ActionsQtModel(controller) - proxy_model = QtCore.QSortFilterProxyModel() - proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) - proxy_model.setSortRole(ACTION_SORT_ROLE) - + proxy_model = ActionsProxyModel() proxy_model.setSourceModel(model) view.setModel(proxy_model) @@ -359,7 +384,8 @@ class ActionsWidget(QtWidgets.QWidget): def _on_model_refresh(self): self._proxy_model.sort(0) # Force repaint all items - self._view.update() + viewport = self._view.viewport() + viewport.update() def _on_animation(self): time_now = time.time() diff --git a/client/ayon_core/tools/loader/models/actions.py b/client/ayon_core/tools/loader/models/actions.py index ad2993af50..cfe91cadab 100644 --- a/client/ayon_core/tools/loader/models/actions.py +++ b/client/ayon_core/tools/loader/models/actions.py @@ -6,6 +6,7 @@ import uuid import ayon_api +from ayon_core.lib import NestedCacheItem from ayon_core.pipeline.load import ( discover_loader_plugins, ProductLoaderPlugin, @@ -17,7 +18,6 @@ from ayon_core.pipeline.load import ( LoadError, IncompatibleLoaderError, ) -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ActionItem ACTIONS_MODEL_SENDER = "actions.model" diff --git a/client/ayon_core/tools/loader/models/products.py b/client/ayon_core/tools/loader/models/products.py index 812446a012..a3bbc30a09 100644 --- a/client/ayon_core/tools/loader/models/products.py +++ b/client/ayon_core/tools/loader/models/products.py @@ -5,8 +5,8 @@ import arrow import ayon_api from ayon_api.operations import OperationsSession +from ayon_core.lib import NestedCacheItem from ayon_core.style import get_default_entity_icon_color -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ( ProductTypeItem, ProductItem, diff --git a/client/ayon_core/tools/loader/models/sitesync.py b/client/ayon_core/tools/loader/models/sitesync.py index 987510905b..02504c2ad3 100644 --- a/client/ayon_core/tools/loader/models/sitesync.py +++ b/client/ayon_core/tools/loader/models/sitesync.py @@ -2,9 +2,8 @@ import collections from ayon_api import get_representations, get_versions_links -from ayon_core.lib import Logger +from ayon_core.lib import Logger, NestedCacheItem from ayon_core.addon import AddonsManager -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ActionItem DOWNLOAD_IDENTIFIER = "sitesync.download" diff --git a/client/ayon_core/tools/loader/ui/window.py b/client/ayon_core/tools/loader/ui/window.py index 3a6f4679fa..8529a53b06 100644 --- a/client/ayon_core/tools/loader/ui/window.py +++ b/client/ayon_core/tools/loader/ui/window.py @@ -335,9 +335,7 @@ class LoaderWindow(QtWidgets.QWidget): def closeEvent(self, event): super(LoaderWindow, self).closeEvent(event) - # Deselect project so current context will be selected - # on next 'showEvent' - self._controller.set_selected_project(None) + self._reset_on_show = True def keyPressEvent(self, event): diff --git a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py index 47c5399cf7..4e34f9b58c 100644 --- a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py @@ -52,6 +52,7 @@ class SelectionTypes: class BaseGroupWidget(QtWidgets.QWidget): selected = QtCore.Signal(str, str, str) removed_selected = QtCore.Signal() + double_clicked = QtCore.Signal() def __init__(self, group_name, parent): super(BaseGroupWidget, self).__init__(parent) @@ -192,6 +193,7 @@ class ConvertorItemsGroupWidget(BaseGroupWidget): else: widget = ConvertorItemCardWidget(item, self) widget.selected.connect(self._on_widget_selection) + widget.double_clicked(self.double_clicked) self._widgets_by_id[item.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 @@ -254,6 +256,7 @@ class InstanceGroupWidget(BaseGroupWidget): ) widget.selected.connect(self._on_widget_selection) widget.active_changed.connect(self._on_active_changed) + widget.double_clicked.connect(self.double_clicked) self._widgets_by_id[instance.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 @@ -271,6 +274,7 @@ class CardWidget(BaseClickableFrame): # Group identifier of card # - this must be set because if send when mouse is released with card id _group_identifier = None + double_clicked = QtCore.Signal() def __init__(self, parent): super(CardWidget, self).__init__(parent) @@ -279,6 +283,11 @@ class CardWidget(BaseClickableFrame): self._selected = False self._id = None + def mouseDoubleClickEvent(self, event): + super(CardWidget, self).mouseDoubleClickEvent(event) + if self._is_valid_double_click(event): + self.double_clicked.emit() + @property def id(self): """Id of card.""" @@ -312,6 +321,9 @@ class CardWidget(BaseClickableFrame): self.selected.emit(self._id, self._group_identifier, selection_type) + def _is_valid_double_click(self, event): + return True + class ContextCardWidget(CardWidget): """Card for global context. @@ -527,6 +539,15 @@ class InstanceCardWidget(CardWidget): def _on_expend_clicked(self): self._set_expanded() + def _is_valid_double_click(self, event): + widget = self.childAt(event.pos()) + if ( + widget is self._active_checkbox + or widget is self._expand_btn + ): + return False + return True + class InstanceCardView(AbstractInstanceView): """Publish access to card view. @@ -534,6 +555,8 @@ class InstanceCardView(AbstractInstanceView): Wrapper of all widgets in card view. """ + double_clicked = QtCore.Signal() + def __init__(self, controller, parent): super(InstanceCardView, self).__init__(parent) @@ -715,6 +738,7 @@ class InstanceCardView(AbstractInstanceView): ) group_widget.active_changed.connect(self._on_active_changed) group_widget.selected.connect(self._on_widget_selection) + group_widget.double_clicked.connect(self.double_clicked) self._content_layout.insertWidget(widget_idx, group_widget) self._widgets_by_group[group_name] = group_widget @@ -755,6 +779,7 @@ class InstanceCardView(AbstractInstanceView): widget = ContextCardWidget(self._content_widget) widget.selected.connect(self._on_widget_selection) + widget.double_clicked.connect(self.double_clicked) self._context_widget = widget @@ -778,6 +803,7 @@ class InstanceCardView(AbstractInstanceView): CONVERTOR_ITEM_GROUP, self._content_widget ) group_widget.selected.connect(self._on_widget_selection) + group_widget.double_clicked.connect(self.double_clicked) self._content_layout.insertWidget(1, group_widget) self._convertor_items_group = group_widget diff --git a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py index 3322a73be6..71be0ab1a4 100644 --- a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py @@ -110,6 +110,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): This is required to be able use custom checkbox on custom place. """ active_changed = QtCore.Signal(str, bool) + double_clicked = QtCore.Signal() def __init__(self, instance, parent): super(InstanceListItemWidget, self).__init__(parent) @@ -149,6 +150,12 @@ class InstanceListItemWidget(QtWidgets.QWidget): self._set_valid_property(instance.has_valid_context) + def mouseDoubleClickEvent(self, event): + widget = self.childAt(event.pos()) + super(InstanceListItemWidget, self).mouseDoubleClickEvent(event) + if widget is not self._active_checkbox: + self.double_clicked.emit() + def _set_valid_property(self, valid): if self._has_valid_context == valid: return @@ -209,6 +216,8 @@ class InstanceListItemWidget(QtWidgets.QWidget): class ListContextWidget(QtWidgets.QFrame): """Context (or global attributes) widget.""" + double_clicked = QtCore.Signal() + def __init__(self, parent): super(ListContextWidget, self).__init__(parent) @@ -225,6 +234,10 @@ class ListContextWidget(QtWidgets.QFrame): self.label_widget = label_widget + def mouseDoubleClickEvent(self, event): + super(ListContextWidget, self).mouseDoubleClickEvent(event) + self.double_clicked.emit() + class InstanceListGroupWidget(QtWidgets.QFrame): """Widget representing group of instances. @@ -317,6 +330,7 @@ class InstanceListGroupWidget(QtWidgets.QFrame): class InstanceTreeView(QtWidgets.QTreeView): """View showing instances and their groups.""" toggle_requested = QtCore.Signal(int) + double_clicked = QtCore.Signal() def __init__(self, *args, **kwargs): super(InstanceTreeView, self).__init__(*args, **kwargs) @@ -425,6 +439,9 @@ class InstanceListView(AbstractInstanceView): This is public access to and from list view. """ + + double_clicked = QtCore.Signal() + def __init__(self, controller, parent): super(InstanceListView, self).__init__(parent) @@ -454,6 +471,7 @@ class InstanceListView(AbstractInstanceView): instance_view.collapsed.connect(self._on_collapse) instance_view.expanded.connect(self._on_expand) instance_view.toggle_requested.connect(self._on_toggle_request) + instance_view.double_clicked.connect(self.double_clicked) self._group_items = {} self._group_widgets = {} @@ -687,6 +705,7 @@ class InstanceListView(AbstractInstanceView): self._active_toggle_enabled ) widget.active_changed.connect(self._on_active_changed) + widget.double_clicked.connect(self.double_clicked) self._instance_view.setIndexWidget(proxy_index, widget) self._widgets_by_id[instance.id] = widget @@ -717,6 +736,7 @@ class InstanceListView(AbstractInstanceView): ) proxy_index = self._proxy_model.mapFromSource(index) widget = ListContextWidget(self._instance_view) + widget.double_clicked.connect(self.double_clicked) self._instance_view.setIndexWidget(proxy_index, widget) self._context_widget = widget diff --git a/client/ayon_core/tools/publisher/widgets/overview_widget.py b/client/ayon_core/tools/publisher/widgets/overview_widget.py index dd82185830..cedf52ae01 100644 --- a/client/ayon_core/tools/publisher/widgets/overview_widget.py +++ b/client/ayon_core/tools/publisher/widgets/overview_widget.py @@ -18,6 +18,7 @@ class OverviewWidget(QtWidgets.QFrame): instance_context_changed = QtCore.Signal() create_requested = QtCore.Signal() convert_requested = QtCore.Signal() + publish_tab_requested = QtCore.Signal() anim_end_value = 200 anim_duration = 200 @@ -113,9 +114,15 @@ class OverviewWidget(QtWidgets.QFrame): product_list_view.selection_changed.connect( self._on_product_change ) + product_list_view.double_clicked.connect( + self.publish_tab_requested + ) product_view_cards.selection_changed.connect( self._on_product_change ) + product_view_cards.double_clicked.connect( + self.publish_tab_requested + ) # Active instances changed product_list_view.active_changed.connect( self._on_active_changed diff --git a/client/ayon_core/tools/publisher/window.py b/client/ayon_core/tools/publisher/window.py index 123864ff6c..1b13ced317 100644 --- a/client/ayon_core/tools/publisher/window.py +++ b/client/ayon_core/tools/publisher/window.py @@ -258,6 +258,9 @@ class PublisherWindow(QtWidgets.QDialog): overview_widget.convert_requested.connect( self._on_convert_requested ) + overview_widget.publish_tab_requested.connect( + self._go_to_publish_tab + ) save_btn.clicked.connect(self._on_save_clicked) reset_btn.clicked.connect(self._on_reset_clicked) diff --git a/client/ayon_core/tools/push_to_project/models/integrate.py b/client/ayon_core/tools/push_to_project/models/integrate.py index 6e43050c05..5937ffa4da 100644 --- a/client/ayon_core/tools/push_to_project/models/integrate.py +++ b/client/ayon_core/tools/push_to_project/models/integrate.py @@ -723,7 +723,6 @@ class ProjectPushItemProcess: dst_project_name = self._item.dst_project_name dst_folder_id = self._item.dst_folder_id dst_task_name = self._item.dst_task_name - dst_task_name_low = dst_task_name.lower() new_folder_name = self._item.new_folder_name if not dst_folder_id and not new_folder_name: self._status.set_failed( @@ -765,7 +764,7 @@ class ProjectPushItemProcess: dst_project_name, folder_ids=[folder_entity["id"]] ) } - task_info = folder_tasks.get(dst_task_name_low) + task_info = folder_tasks.get(dst_task_name.lower()) if not task_info: self._status.set_failed( f"Could find task with name \"{dst_task_name}\"" diff --git a/client/ayon_core/tools/workfile_template_build/lib.py b/client/ayon_core/tools/workfile_template_build/lib.py index de3a0d0084..ffd6fefc38 100644 --- a/client/ayon_core/tools/workfile_template_build/lib.py +++ b/client/ayon_core/tools/workfile_template_build/lib.py @@ -8,12 +8,12 @@ from ayon_core.tools.utils.dialogs import show_message_dialog def open_template_ui(builder, main_window): """Open template from `builder` - Asks user about overwriting current scene and feedsback exceptions. + Asks user about overwriting current scene and feedback exceptions. """ result = QtWidgets.QMessageBox.question( main_window, "Opening template", - "Caution! You will loose unsaved changes.\nDo you want to continue?", + "Caution! You will lose unsaved changes.\nDo you want to continue?", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No ) if result == QtWidgets.QMessageBox.Yes: diff --git a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py index 6a1572deb2..fe6abee951 100644 --- a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py +++ b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py @@ -20,6 +20,8 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel): controller (AbstractWorkfilesFrontend): The control object. """ + refreshed = QtCore.Signal() + def __init__(self, controller): super(WorkAreaFilesModel, self).__init__() @@ -163,6 +165,12 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel): self._fill_items() def _fill_items(self): + try: + self._fill_items_impl() + finally: + self.refreshed.emit() + + def _fill_items_impl(self): folder_id = self._selected_folder_id task_id = self._selected_task_id if not folder_id or not task_id: @@ -285,6 +293,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): selection_model.selectionChanged.connect(self._on_selection_change) view.double_clicked.connect(self._on_mouse_double_click) view.customContextMenuRequested.connect(self._on_context_menu) + model.refreshed.connect(self._on_model_refresh) controller.register_event_callback( "expected_selection_changed", @@ -298,6 +307,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): self._controller = controller self._published_mode = False + self._change_selection_on_refresh = True def set_published_mode(self, published_mode): """Set the published mode. @@ -379,7 +389,9 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): if not workfile_info["current"]: return + self._change_selection_on_refresh = False self._model.refresh() + self._change_selection_on_refresh = True workfile_name = workfile_info["name"] if ( @@ -394,3 +406,30 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): self._controller.expected_workfile_selected( event["folder"]["id"], event["task"]["name"], workfile_name ) + + def _on_model_refresh(self): + if ( + not self._change_selection_on_refresh + or self._proxy_model.rowCount() < 1 + ): + return + + # Find the row with latest date modified + latest_index = max( + ( + self._proxy_model.index(idx, 0) + for idx in range(self._proxy_model.rowCount()) + ), + key=lambda model_index: model_index.data(DATE_MODIFIED_ROLE) + ) + + # Select row of latest modified + selection_model = self._view.selectionModel() + selection_model.select( + latest_index, + ( + QtCore.QItemSelectionModel.ClearAndSelect + | QtCore.QItemSelectionModel.Current + | QtCore.QItemSelectionModel.Rows + ) + ) diff --git a/client/ayon_core/tools/workfiles/widgets/window.py b/client/ayon_core/tools/workfiles/widgets/window.py index 8a2617d270..1cfae7ec90 100644 --- a/client/ayon_core/tools/workfiles/widgets/window.py +++ b/client/ayon_core/tools/workfiles/widgets/window.py @@ -118,11 +118,11 @@ class WorkfilesToolWindow(QtWidgets.QWidget): overlay_invalid_host = InvalidHostOverlay(self) overlay_invalid_host.setVisible(False) - first_show_timer = QtCore.QTimer() - first_show_timer.setSingleShot(True) - first_show_timer.setInterval(50) + show_timer = QtCore.QTimer() + show_timer.setSingleShot(True) + show_timer.setInterval(50) - first_show_timer.timeout.connect(self._on_first_show) + show_timer.timeout.connect(self._on_show) controller.register_event_callback( "save_as.finished", @@ -159,7 +159,7 @@ class WorkfilesToolWindow(QtWidgets.QWidget): self._tasks_widget = tasks_widget self._side_panel = side_panel - self._first_show_timer = first_show_timer + self._show_timer = show_timer self._post_init() @@ -287,9 +287,9 @@ class WorkfilesToolWindow(QtWidgets.QWidget): def showEvent(self, event): super(WorkfilesToolWindow, self).showEvent(event) + self._show_timer.start() if self._first_show: self._first_show = False - self._first_show_timer.start() self.setStyleSheet(style.load_stylesheet()) def keyPressEvent(self, event): @@ -303,9 +303,8 @@ class WorkfilesToolWindow(QtWidgets.QWidget): pass - def _on_first_show(self): - if not self._controller_refreshed: - self.refresh() + def _on_show(self): + self.refresh() def _on_file_text_filter_change(self, text): self._files_widget.set_text_filter(text) diff --git a/client/ayon_core/version.py b/client/ayon_core/version.py index a60de0493a..275e1b1dd6 100644 --- a/client/ayon_core/version.py +++ b/client/ayon_core/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring AYON core addon version.""" -__version__ = "0.3.1-dev.1" +__version__ = "0.3.2-dev.1" diff --git a/client/pyproject.toml b/client/pyproject.toml index 1a0ad7e5f2..5e811321f8 100644 --- a/client/pyproject.toml +++ b/client/pyproject.toml @@ -16,7 +16,7 @@ aiohttp_json_rpc = "*" # TVPaint server aiohttp-middlewares = "^2.0.0" wsrpc_aiohttp = "^3.1.1" # websocket server Click = "^8" -OpenTimelineIO = "0.14.1" +OpenTimelineIO = "0.16.0" opencolorio = "2.2.1" Pillow = "9.5.0" pynput = "^1.7.2" # Timers manager - TODO remove diff --git a/package.py b/package.py index 79450d029f..b7b8d2dae6 100644 --- a/package.py +++ b/package.py @@ -1,11 +1,12 @@ name = "core" title = "Core" -version = "0.3.1-dev.1" +version = "0.3.2-dev.1" client_dir = "ayon_core" plugin_for = ["ayon_server"] -requires = [ - "~ayon_server-1.0.3+<2.0.0", -] +ayon_server_version = ">=1.0.3,<2.0.0" +ayon_launcher_version = ">=1.0.2" +ayon_required_addons = {} +ayon_compatible_addons = {} diff --git a/pyproject.toml b/pyproject.toml index dc8b312364..4726bef41a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,6 +77,20 @@ unfixable = [] # Allow unused variables when underscore-prefixed. dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" +exclude = [ + "client/ayon_core/hosts/unreal/integration/*", + "client/ayon_core/hosts/aftereffects/api/extension/js/libs/*", + "client/ayon_core/hosts/hiero/api/startup/*", + "client/ayon_core/modules/deadline/repository/custom/plugins/CelAction/*", + "client/ayon_core/modules/deadline/repository/custom/plugins/HarmonyAYON/*", + "client/ayon_core/modules/click_wrap.py", + "client/ayon_core/scripts/slates/__init__.py" +] + +[tool.ruff.lint.per-file-ignores] +"client/ayon_core/lib/__init__.py" = ["E402"] +"client/ayon_core/hosts/max/startup/startup.py" = ["E402"] + [tool.ruff.format] # Like Black, use double quotes for strings. quote-style = "double" @@ -92,7 +106,7 @@ line-ending = "auto" [tool.codespell] # Ignore words that are not in the dictionary. -ignore-words-list = "ayon,ynput,parms,parm,hda,developpement" +ignore-words-list = "ayon,ynput,parms,parm,hda,developpement,ue" skip = "./.*,./package/*,*/vendor/*,*/unreal/integration/*,*/aftereffects/api/extension/js/libs/*" count = true diff --git a/server/__init__.py b/server/__init__.py index 152cc77218..79f505ccd5 100644 --- a/server/__init__.py +++ b/server/__init__.py @@ -1,3 +1,5 @@ +from typing import Any + from ayon_server.addons import BaseServerAddon from .settings import CoreSettings, DEFAULT_VALUES @@ -9,3 +11,53 @@ class CoreAddon(BaseServerAddon): async def get_default_settings(self): settings_model_cls = self.get_settings_model() return settings_model_cls(**DEFAULT_VALUES) + + async def convert_settings_overrides( + self, + source_version: str, + overrides: dict[str, Any], + ) -> dict[str, Any]: + self._convert_imagio_configs_0_3_1(overrides) + # Use super conversion + return await super().convert_settings_overrides( + source_version, overrides + ) + + def _convert_imagio_configs_0_3_1(self, overrides): + """Imageio config settings did change to profiles since 0.3.1. .""" + imageio_overrides = overrides.get("imageio") or {} + if ( + "ocio_config" not in imageio_overrides + or "filepath" not in imageio_overrides["ocio_config"] + ): + return + + ocio_config = imageio_overrides.pop("ocio_config") + + filepath = ocio_config["filepath"] + if not filepath: + return + first_filepath = filepath[0] + ocio_config_profiles = imageio_overrides.setdefault( + "ocio_config_profiles", [] + ) + base_value = { + "type": "builtin_path", + "product_name": "", + "host_names": [], + "task_names": [], + "task_types": [], + "custom_path": "", + "builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio" + } + if first_filepath in ( + "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", + "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio", + ): + base_value["type"] = "builtin_path" + base_value["builtin_path"] = first_filepath + else: + base_value["type"] = "custom_path" + base_value["custom_path"] = first_filepath + + ocio_config_profiles.append(base_value) diff --git a/server/settings/main.py b/server/settings/main.py index 28a69e182d..40e16e7e91 100644 --- a/server/settings/main.py +++ b/server/settings/main.py @@ -54,9 +54,67 @@ class CoreImageIOFileRulesModel(BaseSettingsModel): return value -class CoreImageIOConfigModel(BaseSettingsModel): - filepath: list[str] = SettingsField( - default_factory=list, title="Config path" +def _ocio_config_profile_types(): + return [ + {"value": "builtin_path", "label": "AYON built-in OCIO config"}, + {"value": "custom_path", "label": "Path to OCIO config"}, + {"value": "product_name", "label": "Published product"}, + ] + + +def _ocio_built_in_paths(): + return [ + { + "value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", + "label": "ACES 1.2", + "description": "Aces 1.2 OCIO config file." + }, + { + "value": "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio", + "label": "Nuke default", + }, + ] + + +class CoreImageIOConfigProfilesModel(BaseSettingsModel): + _layout = "expanded" + host_names: list[str] = SettingsField( + default_factory=list, + title="Host names" + ) + task_types: list[str] = SettingsField( + default_factory=list, + title="Task types", + enum_resolver=task_types_enum + ) + task_names: list[str] = SettingsField( + default_factory=list, + title="Task names" + ) + type: str = SettingsField( + title="Profile type", + enum_resolver=_ocio_config_profile_types, + conditionalEnum=True, + default="builtin_path", + section="---", + ) + builtin_path: str = SettingsField( + "ACES 1.2", + title="Built-in OCIO config", + enum_resolver=_ocio_built_in_paths, + ) + custom_path: str = SettingsField( + "", + title="OCIO config path", + description="Path to OCIO config. Anatomy formatting is supported.", + ) + product_name: str = SettingsField( + "", + title="Product name", + description=( + "Published product name to get OCIO config from. " + "Partial match is supported." + ), ) @@ -65,9 +123,8 @@ class CoreImageIOBaseModel(BaseSettingsModel): False, title="Enable Color Management" ) - ocio_config: CoreImageIOConfigModel = SettingsField( - default_factory=CoreImageIOConfigModel, - title="OCIO config" + ocio_config_profiles: list[CoreImageIOConfigProfilesModel] = SettingsField( + default_factory=list, title="OCIO config profiles" ) file_rules: CoreImageIOFileRulesModel = SettingsField( default_factory=CoreImageIOFileRulesModel, @@ -186,12 +243,17 @@ class CoreSettings(BaseSettingsModel): DEFAULT_VALUES = { "imageio": { "activate_global_color_management": False, - "ocio_config": { - "filepath": [ - "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", - "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio" - ] - }, + "ocio_config_profiles": [ + { + "host_names": [], + "task_types": [], + "task_names": [], + "type": "builtin_path", + "builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", + "custom_path": "", + "product_name": "", + } + ], "file_rules": { "activate_global_file_rules": False, "rules": [ @@ -199,42 +261,57 @@ DEFAULT_VALUES = { "name": "example", "pattern": ".*(beauty).*", "colorspace": "ACES - ACEScg", - "ext": "exr" + "ext": "exr", } - ] - } + ], + }, }, "studio_name": "", "studio_code": "", - "environments": "{\n\"STUDIO_SW\": {\n \"darwin\": \"/mnt/REPO_SW\",\n \"linux\": \"/mnt/REPO_SW\",\n \"windows\": \"P:/REPO_SW\"\n }\n}", + "environments": json.dumps( + { + "STUDIO_SW": { + "darwin": "/mnt/REPO_SW", + "linux": "/mnt/REPO_SW", + "windows": "P:/REPO_SW" + } + }, + indent=4 + ), "tools": DEFAULT_TOOLS_VALUES, "version_start_category": { "profiles": [] }, "publish": DEFAULT_PUBLISH_VALUES, - "project_folder_structure": json.dumps({ - "__project_root__": { - "prod": {}, - "resources": { - "footage": { - "plates": {}, - "offline": {} + "project_folder_structure": json.dumps( + { + "__project_root__": { + "prod": {}, + "resources": { + "footage": { + "plates": {}, + "offline": {} + }, + "audio": {}, + "art_dept": {} }, - "audio": {}, - "art_dept": {} - }, - "editorial": {}, - "assets": { - "characters": {}, - "locations": {} - }, - "shots": {} - } - }, indent=4), + "editorial": {}, + "assets": { + "characters": {}, + "locations": {} + }, + "shots": {} + } + }, + indent=4 + ), "project_plugins": { "windows": [], "darwin": [], "linux": [] }, - "project_environments": "{}" + "project_environments": json.dumps( + {}, + indent=4 + ) } diff --git a/server/settings/publish_plugins.py b/server/settings/publish_plugins.py index e61bf6986b..61e73ce912 100644 --- a/server/settings/publish_plugins.py +++ b/server/settings/publish_plugins.py @@ -59,6 +59,33 @@ class CollectFramesFixDefModel(BaseSettingsModel): ) +class ValidateOutdatedContainersProfile(BaseSettingsModel): + _layout = "expanded" + # Filtering + host_names: list[str] = SettingsField( + default_factory=list, + title="Host names" + ) + # Profile values + enabled: bool = SettingsField(True, title="Enabled") + optional: bool = SettingsField(True, title="Optional") + active: bool = SettingsField(True, title="Active") + + +class ValidateOutdatedContainersModel(BaseSettingsModel): + """Validate if Publishing intent was selected. + + It is possible to disable validation for specific publishing context + with profiles. + """ + + _isGroup = True + plugin_state_profiles: list[ValidateOutdatedContainersProfile] = SettingsField( + default_factory=list, + title="Plugin enable state profiles", + ) + + class ValidateIntentProfile(BaseSettingsModel): _layout = "expanded" hosts: list[str] = SettingsField(default_factory=list, title="Host names") @@ -770,6 +797,10 @@ class PublishPuginsModel(BaseSettingsModel): default_factory=ValidateBaseModel, title="Validate Version" ) + ValidateOutdatedContainers: ValidateOutdatedContainersModel = SettingsField( + default_factory=ValidateOutdatedContainersModel, + title="Validate Containers" + ) ValidateIntent: ValidateIntentModel = SettingsField( default_factory=ValidateIntentModel, title="Validate Intent" @@ -855,6 +886,25 @@ DEFAULT_PUBLISH_VALUES = { "optional": False, "active": True }, + "ValidateOutdatedContainers": { + "plugin_state_profiles": [ + { + # Default host names are based on original + # filter of ValidateContainer pyblish plugin + "host_names": [ + "maya", + "houdini", + "nuke", + "harmony", + "photoshop", + "aftereffects" + ], + "enabled": True, + "optional": True, + "active": True + } + ] + }, "ValidateIntent": { "enabled": False, "profiles": [] diff --git a/server_addon/aftereffects/package.py b/server_addon/aftereffects/package.py new file mode 100644 index 0000000000..7a2f9bc7af --- /dev/null +++ b/server_addon/aftereffects/package.py @@ -0,0 +1,3 @@ +name = "aftereffects" +title = "AfterEffects" +version = "0.1.4" diff --git a/server_addon/aftereffects/server/__init__.py b/server_addon/aftereffects/server/__init__.py index e14e76e9db..76e6d5b2eb 100644 --- a/server_addon/aftereffects/server/__init__.py +++ b/server_addon/aftereffects/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon from .settings import AfterEffectsSettings, DEFAULT_AFTEREFFECTS_SETTING -from .version import __version__ class AfterEffects(BaseServerAddon): - name = "aftereffects" - title = "AfterEffects" - version = __version__ - settings_model = AfterEffectsSettings async def get_default_settings(self): diff --git a/server_addon/aftereffects/server/settings/publish_plugins.py b/server_addon/aftereffects/server/settings/publish_plugins.py index 61d67f26d3..a9f30c6686 100644 --- a/server_addon/aftereffects/server/settings/publish_plugins.py +++ b/server_addon/aftereffects/server/settings/publish_plugins.py @@ -22,12 +22,6 @@ class ValidateSceneSettingsModel(BaseSettingsModel): ) -class ValidateContainersModel(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - optional: bool = SettingsField(True, title="Optional") - active: bool = SettingsField(True, title="Active") - - class AfterEffectsPublishPlugins(BaseSettingsModel): CollectReview: CollectReviewPluginModel = SettingsField( default_factory=CollectReviewPluginModel, @@ -37,10 +31,6 @@ class AfterEffectsPublishPlugins(BaseSettingsModel): default_factory=ValidateSceneSettingsModel, title="Validate Scene Settings", ) - ValidateContainers: ValidateContainersModel = SettingsField( - default_factory=ValidateContainersModel, - title="Validate Containers", - ) AE_PUBLISH_PLUGINS_DEFAULTS = { @@ -58,9 +48,4 @@ AE_PUBLISH_PLUGINS_DEFAULTS = { ".*" ] }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True, - } } diff --git a/server_addon/aftereffects/server/version.py b/server_addon/aftereffects/server/version.py deleted file mode 100644 index e57ad00718..0000000000 --- a/server_addon/aftereffects/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.3" diff --git a/server_addon/applications/client/ayon_applications/addon.py b/server_addon/applications/client/ayon_applications/addon.py index 0f1b68af0e..a8eaa46cad 100644 --- a/server_addon/applications/client/ayon_applications/addon.py +++ b/server_addon/applications/client/ayon_applications/addon.py @@ -110,6 +110,26 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): ] } + def launch_application( + self, app_name, project_name, folder_path, task_name + ): + """Launch application. + + Args: + app_name (str): Full application name e.g. 'maya/2024'. + project_name (str): Project name. + folder_path (str): Folder path. + task_name (str): Task name. + + """ + app_manager = self.get_applications_manager() + return app_manager.launch( + app_name, + project_name=project_name, + folder_path=folder_path, + task_name=task_name, + ) + # --- CLI --- def cli(self, addon_click_group): main_group = click_wrap.group( @@ -134,6 +154,17 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): default=None ) ) + ( + main_group.command( + self._cli_launch_applications, + name="launch", + help="Launch application" + ) + .option("--app", required=True, help="Application name") + .option("--project", required=True, help="Project name") + .option("--folder", required=True, help="Folder path") + .option("--task", required=True, help="Task name") + ) # Convert main command to click object and add it to parent group addon_click_group.add_command( main_group.to_click_obj() @@ -171,3 +202,15 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): with open(output_json_path, "w") as file_stream: json.dump(env, file_stream, indent=4) + + def _cli_launch_applications(self, project, folder, task, app): + """Launch application. + + Args: + project (str): Project name. + folder (str): Folder path. + task (str): Task name. + app (str): Full application name e.g. 'maya/2024'. + + """ + self.launch_application(app, project, folder, task) diff --git a/server_addon/applications/client/ayon_applications/utils.py b/server_addon/applications/client/ayon_applications/utils.py index 234fa6c683..185779a949 100644 --- a/server_addon/applications/client/ayon_applications/utils.py +++ b/server_addon/applications/client/ayon_applications/utils.py @@ -281,13 +281,20 @@ def prepare_app_environments( app.environment ] + task_entity = data.get("task_entity") folder_entity = data.get("folder_entity") # Add tools environments groups_by_name = {} tool_by_group_name = collections.defaultdict(dict) - if folder_entity: - # Make sure each tool group can be added only once - for key in folder_entity["attrib"].get("tools") or []: + tools = None + if task_entity: + tools = task_entity["attrib"].get("tools") + + if tools is None and folder_entity: + tools = folder_entity["attrib"].get("tools") + + if tools: + for key in tools: tool = app.manager.tools.get(key) if not tool or not tool.is_valid_for_app(app): continue diff --git a/server_addon/applications/package.py b/server_addon/applications/package.py index ce312ed662..983749355e 100644 --- a/server_addon/applications/package.py +++ b/server_addon/applications/package.py @@ -1,3 +1,10 @@ name = "applications" title = "Applications" -version = "0.2.0" +version = "0.2.2" + +ayon_server_version = ">=1.0.7" +ayon_launcher_version = ">=1.0.2" +ayon_required_addons = { + "core": ">0.3.0", +} +ayon_compatible_addons = {} diff --git a/server_addon/applications/server/applications.json b/server_addon/applications/server/applications.json index e4b72fdff9..84b7fa33cf 100644 --- a/server_addon/applications/server/applications.json +++ b/server_addon/applications/server/applications.json @@ -1271,6 +1271,28 @@ } ] }, + "equalizer": { + "enabled": true, + "label": "3DEqualizer", + "icon": "{}/app_icons/3de4.png", + "host_name": "equalizer", + "environment": "{}", + "variants": [ + { + "name": "7-1v2", + "label": "7.1v2", + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\3DE4_win64_r7.1v2\\bin\\3DE4.exe" + ], + "darwin": [], + "linux": [] + }, + "environment": "{}" + } + ] + }, "additional_apps": [] } } diff --git a/server_addon/applications/server/settings.py b/server_addon/applications/server/settings.py index 5743e9f471..b77686cee0 100644 --- a/server_addon/applications/server/settings.py +++ b/server_addon/applications/server/settings.py @@ -190,6 +190,8 @@ class ApplicationsSettings(BaseSettingsModel): default_factory=AppGroupWithPython, title="OpenRV") zbrush: AppGroup = SettingsField( default_factory=AppGroupWithPython, title="Zbrush") + equalizer: AppGroup = SettingsField( + default_factory=AppGroupWithPython, title="3DEqualizer") additional_apps: list[AdditionalAppGroup] = SettingsField( default_factory=list, title="Additional Applications") diff --git a/server_addon/blender/package.py b/server_addon/blender/package.py new file mode 100644 index 0000000000..667076e533 --- /dev/null +++ b/server_addon/blender/package.py @@ -0,0 +1,3 @@ +name = "blender" +title = "Blender" +version = "0.1.8" diff --git a/server_addon/blender/server/__init__.py b/server_addon/blender/server/__init__.py index a7d6cb4400..b274e3bc29 100644 --- a/server_addon/blender/server/__init__.py +++ b/server_addon/blender/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import BlenderSettings, DEFAULT_VALUES class BlenderAddon(BaseServerAddon): - name = "blender" - title = "Blender" - version = __version__ settings_model: Type[BlenderSettings] = BlenderSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/blender/server/version.py b/server_addon/blender/server/version.py deleted file mode 100644 index 9cb17e7976..0000000000 --- a/server_addon/blender/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.8" diff --git a/server_addon/celaction/package.py b/server_addon/celaction/package.py new file mode 100644 index 0000000000..2b11a8630f --- /dev/null +++ b/server_addon/celaction/package.py @@ -0,0 +1,3 @@ +name = "celaction" +title = "CelAction" +version = "0.1.0" diff --git a/server_addon/celaction/server/__init__.py b/server_addon/celaction/server/__init__.py index 90d3dbaa01..e3769a4b7f 100644 --- a/server_addon/celaction/server/__init__.py +++ b/server_addon/celaction/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import CelActionSettings, DEFAULT_VALUES class CelActionAddon(BaseServerAddon): - name = "celaction" - title = "CelAction" - version = __version__ settings_model: Type[CelActionSettings] = CelActionSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/celaction/server/version.py b/server_addon/celaction/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/celaction/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/clockify/package.py b/server_addon/clockify/package.py new file mode 100644 index 0000000000..bcf9425b3f --- /dev/null +++ b/server_addon/clockify/package.py @@ -0,0 +1,3 @@ +name = "clockify" +title = "Clockify" +version = "0.1.1" diff --git a/server_addon/clockify/server/__init__.py b/server_addon/clockify/server/__init__.py index 0fa453fdf4..11bbfed261 100644 --- a/server_addon/clockify/server/__init__.py +++ b/server_addon/clockify/server/__init__.py @@ -2,14 +2,8 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import ClockifySettings class ClockifyAddon(BaseServerAddon): - name = "clockify" - title = "Clockify" - version = __version__ settings_model: Type[ClockifySettings] = ClockifySettings - frontend_scopes = {} - services = {} diff --git a/server_addon/clockify/server/version.py b/server_addon/clockify/server/version.py deleted file mode 100644 index 485f44ac21..0000000000 --- a/server_addon/clockify/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.1" diff --git a/server_addon/create_ayon_addons.py b/server_addon/create_ayon_addons.py index bfd601af07..749077d2a8 100644 --- a/server_addon/create_ayon_addons.py +++ b/server_addon/create_ayon_addons.py @@ -5,7 +5,7 @@ import shutil import argparse import zipfile import types -import importlib +import importlib.machinery import platform import collections from pathlib import Path @@ -47,7 +47,7 @@ plugin_for = ["ayon_server"] """ CLIENT_VERSION_CONTENT = '''# -*- coding: utf-8 -*- -"""Package declaring AYON core addon version.""" +"""Package declaring AYON addon '{}' version.""" __version__ = "{}" ''' @@ -183,6 +183,7 @@ def create_addon_zip( def prepare_client_code( + addon_name: str, addon_dir: Path, addon_output_dir: Path, addon_version: str @@ -211,7 +212,9 @@ def prepare_client_code( version_path = subpath / "version.py" if version_path.exists(): with open(version_path, "w") as stream: - stream.write(CLIENT_VERSION_CONTENT.format(addon_version)) + stream.write( + CLIENT_VERSION_CONTENT.format(addon_name, addon_version) + ) zip_filepath = private_dir / "client.zip" with ZipFileLongPaths(zip_filepath, "w", zipfile.ZIP_DEFLATED) as zipf: @@ -245,12 +248,8 @@ def create_addon_package( keep_source: bool, ): src_package_py = addon_dir / "package.py" - package = None - if src_package_py.exists(): - package = import_filepath(src_package_py) - addon_version = package.version - else: - addon_version = get_addon_version(addon_dir) + package = import_filepath(src_package_py) + addon_version = package.version addon_output_dir = output_dir / addon_dir.name / addon_version if addon_output_dir.exists(): @@ -259,25 +258,16 @@ def create_addon_package( # Copy server content dst_package_py = addon_output_dir / "package.py" - if package is not None: - shutil.copy(src_package_py, dst_package_py) - else: - addon_name = addon_dir.name - if addon_name == "royal_render": - addon_name = "royalrender" - package_py_content = PACKAGE_PY_TEMPLATE.format( - addon_name=addon_name, addon_version=addon_version - ) - - with open(dst_package_py, "w+") as pkg_py: - pkg_py.write(package_py_content) + shutil.copy(src_package_py, dst_package_py) server_dir = addon_dir / "server" shutil.copytree( server_dir, addon_output_dir / "server", dirs_exist_ok=True ) - prepare_client_code(addon_dir, addon_output_dir, addon_version) + prepare_client_code( + package.name, addon_dir, addon_output_dir, addon_version + ) if create_zip: create_addon_zip( diff --git a/server_addon/deadline/package.py b/server_addon/deadline/package.py new file mode 100644 index 0000000000..e26734c813 --- /dev/null +++ b/server_addon/deadline/package.py @@ -0,0 +1,3 @@ +name = "deadline" +title = "Deadline" +version = "0.1.12" diff --git a/server_addon/deadline/server/__init__.py b/server_addon/deadline/server/__init__.py index 36d04189a9..8d2dc152cd 100644 --- a/server_addon/deadline/server/__init__.py +++ b/server_addon/deadline/server/__init__.py @@ -2,15 +2,13 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ -from .settings import DeadlineSettings, DEFAULT_VALUES +from .settings import DeadlineSettings, DEFAULT_VALUES, DeadlineSiteSettings class Deadline(BaseServerAddon): - name = "deadline" - title = "Deadline" - version = __version__ settings_model: Type[DeadlineSettings] = DeadlineSettings + site_settings_model: Type[DeadlineSiteSettings] = DeadlineSiteSettings + async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/deadline/server/settings/__init__.py b/server_addon/deadline/server/settings/__init__.py index 0307862afa..d25c0fb330 100644 --- a/server_addon/deadline/server/settings/__init__.py +++ b/server_addon/deadline/server/settings/__init__.py @@ -2,9 +2,11 @@ from .main import ( DeadlineSettings, DEFAULT_VALUES, ) +from .site_settings import DeadlineSiteSettings __all__ = ( "DeadlineSettings", + "DeadlineSiteSettings", "DEFAULT_VALUES", ) diff --git a/server_addon/deadline/server/settings/main.py b/server_addon/deadline/server/settings/main.py index 83c7567c0d..47ad72a86f 100644 --- a/server_addon/deadline/server/settings/main.py +++ b/server_addon/deadline/server/settings/main.py @@ -15,14 +15,8 @@ from .publish_plugins import ( ) -class ServerListSubmodel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Name") - value: str = SettingsField(title="Value") - - async def defined_deadline_ws_name_enum_resolver( - addon: BaseServerAddon, + addon: "BaseServerAddon", settings_variant: str = "production", project_name: str | None = None, ) -> list[str]: @@ -32,25 +26,39 @@ async def defined_deadline_ws_name_enum_resolver( settings = await addon.get_studio_settings(variant=settings_variant) - ws_urls = [] + ws_server_name = [] for deadline_url_item in settings.deadline_urls: - ws_urls.append(deadline_url_item.name) + ws_server_name.append(deadline_url_item.name) - return ws_urls + return ws_server_name + +class ServerItemSubmodel(BaseSettingsModel): + """Connection info about configured DL servers.""" + _layout = "compact" + name: str = SettingsField(title="Name") + value: str = SettingsField(title="Url") + require_authentication: bool = SettingsField( + False, title="Require authentication") + not_verify_ssl: bool = SettingsField( + False, title="Don't verify SSL") class DeadlineSettings(BaseSettingsModel): - deadline_urls: list[ServerListSubmodel] = SettingsField( + # configured DL servers + deadline_urls: list[ServerItemSubmodel] = SettingsField( default_factory=list, - title="System Deadline Webservice URLs", + title="System Deadline Webservice Info", scope=["studio"], ) + + # name(key) of selected server for project deadline_server: str = SettingsField( - title="Project deadline server", + title="Project Deadline server name", section="---", scope=["project"], enum_resolver=defined_deadline_ws_name_enum_resolver ) + publish: PublishPluginsModel = SettingsField( default_factory=PublishPluginsModel, title="Publish Plugins", @@ -62,11 +70,14 @@ class DeadlineSettings(BaseSettingsModel): return value + DEFAULT_VALUES = { "deadline_urls": [ { "name": "default", - "value": "http://127.0.0.1:8082" + "value": "http://127.0.0.1:8082", + "require_authentication": False, + "not_verify_ssl": False } ], "deadline_server": "default", diff --git a/server_addon/deadline/server/settings/publish_plugins.py b/server_addon/deadline/server/settings/publish_plugins.py index 9f69143e37..784ad2560b 100644 --- a/server_addon/deadline/server/settings/publish_plugins.py +++ b/server_addon/deadline/server/settings/publish_plugins.py @@ -191,7 +191,6 @@ class NukeSubmitDeadlineModel(BaseSettingsModel): @validator( "limit_groups", - "env_allowed_keys", "env_search_replace_values") def validate_unique_names(cls, value): ensure_unique_names(value) diff --git a/server_addon/deadline/server/settings/site_settings.py b/server_addon/deadline/server/settings/site_settings.py new file mode 100644 index 0000000000..a77a6edc7e --- /dev/null +++ b/server_addon/deadline/server/settings/site_settings.py @@ -0,0 +1,26 @@ +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField, +) +from .main import defined_deadline_ws_name_enum_resolver + + +class CredentialPerServerModel(BaseSettingsModel): + """Provide credentials for configured DL servers""" + _layout = "expanded" + server_name: str = SettingsField("", + title="DL server name", + enum_resolver=defined_deadline_ws_name_enum_resolver) + username: str = SettingsField("", + title="Username") + password: str = SettingsField("", + title="Password") + + +class DeadlineSiteSettings(BaseSettingsModel): + local_settings: list[CredentialPerServerModel] = SettingsField( + default_factory=list, + title="Local setting", + description="Please provide credentials for configured Deadline servers", + ) + diff --git a/server_addon/flame/package.py b/server_addon/flame/package.py new file mode 100644 index 0000000000..8c077ed91d --- /dev/null +++ b/server_addon/flame/package.py @@ -0,0 +1,3 @@ +name = "flame" +title = "Flame" +version = "0.1.0" diff --git a/server_addon/flame/server/__init__.py b/server_addon/flame/server/__init__.py index 7d5eb3960f..4aa46617ee 100644 --- a/server_addon/flame/server/__init__.py +++ b/server_addon/flame/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import FlameSettings, DEFAULT_VALUES class FlameAddon(BaseServerAddon): - name = "flame" - title = "Flame" - version = __version__ settings_model: Type[FlameSettings] = FlameSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/flame/server/version.py b/server_addon/flame/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/flame/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/fusion/package.py b/server_addon/fusion/package.py new file mode 100644 index 0000000000..9e7a46df2c --- /dev/null +++ b/server_addon/fusion/package.py @@ -0,0 +1,3 @@ +name = "fusion" +title = "Fusion" +version = "0.1.5" diff --git a/server_addon/fusion/server/__init__.py b/server_addon/fusion/server/__init__.py index 4d43f28812..0456cfd5ee 100644 --- a/server_addon/fusion/server/__init__.py +++ b/server_addon/fusion/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import FusionSettings, DEFAULT_VALUES class FusionAddon(BaseServerAddon): - name = "fusion" - title = "Fusion" - version = __version__ settings_model: Type[FusionSettings] = FusionSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/fusion/server/version.py b/server_addon/fusion/server/version.py deleted file mode 100644 index 1276d0254f..0000000000 --- a/server_addon/fusion/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.5" diff --git a/server_addon/harmony/package.py b/server_addon/harmony/package.py new file mode 100644 index 0000000000..00824cedef --- /dev/null +++ b/server_addon/harmony/package.py @@ -0,0 +1,3 @@ +name = "harmony" +title = "Harmony" +version = "0.1.3" diff --git a/server_addon/harmony/server/__init__.py b/server_addon/harmony/server/__init__.py index 4ecda1989e..154618241e 100644 --- a/server_addon/harmony/server/__init__.py +++ b/server_addon/harmony/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon from .settings import HarmonySettings, DEFAULT_HARMONY_SETTING -from .version import __version__ class Harmony(BaseServerAddon): - name = "harmony" - title = "Harmony" - version = __version__ - settings_model = HarmonySettings async def get_default_settings(self): diff --git a/server_addon/harmony/server/settings/main.py b/server_addon/harmony/server/settings/main.py index 9c780b63c2..8a72c966d8 100644 --- a/server_addon/harmony/server/settings/main.py +++ b/server_addon/harmony/server/settings/main.py @@ -45,11 +45,6 @@ DEFAULT_HARMONY_SETTING = { "optional": True, "active": True }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateSceneSettings": { "enabled": True, "optional": True, diff --git a/server_addon/harmony/server/settings/publish_plugins.py b/server_addon/harmony/server/settings/publish_plugins.py index c9e7c515e4..2d976389f6 100644 --- a/server_addon/harmony/server/settings/publish_plugins.py +++ b/server_addon/harmony/server/settings/publish_plugins.py @@ -18,14 +18,6 @@ class ValidateAudioPlugin(BaseSettingsModel): active: bool = SettingsField(True, title="Active") -class ValidateContainersPlugin(BaseSettingsModel): - """Check if loaded container is scene are latest versions.""" - _isGroup = True - enabled: bool = True - optional: bool = SettingsField(False, title="Optional") - active: bool = SettingsField(True, title="Active") - - class ValidateSceneSettingsPlugin(BaseSettingsModel): """Validate if FrameStart, FrameEnd and Resolution match shot data in DB. Use regular expressions to limit validations only on particular asset @@ -63,11 +55,6 @@ class HarmonyPublishPlugins(BaseSettingsModel): default_factory=ValidateAudioPlugin, ) - ValidateContainers: ValidateContainersPlugin = SettingsField( - title="Validate Containers", - default_factory=ValidateContainersPlugin, - ) - ValidateSceneSettings: ValidateSceneSettingsPlugin = SettingsField( title="Validate Scene Settings", default_factory=ValidateSceneSettingsPlugin, diff --git a/server_addon/harmony/server/version.py b/server_addon/harmony/server/version.py deleted file mode 100644 index df0c92f1e2..0000000000 --- a/server_addon/harmony/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.2" diff --git a/server_addon/hiero/package.py b/server_addon/hiero/package.py new file mode 100644 index 0000000000..54c2f74fa7 --- /dev/null +++ b/server_addon/hiero/package.py @@ -0,0 +1,3 @@ +name = "hiero" +title = "Hiero" +version = "0.1.3" diff --git a/server_addon/hiero/server/__init__.py b/server_addon/hiero/server/__init__.py index d0f9bcefc3..3db78eafd7 100644 --- a/server_addon/hiero/server/__init__.py +++ b/server_addon/hiero/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import HieroSettings, DEFAULT_VALUES class HieroAddon(BaseServerAddon): - name = "hiero" - title = "Hiero" - version = __version__ settings_model: Type[HieroSettings] = HieroSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/hiero/server/settings/imageio.py b/server_addon/hiero/server/settings/imageio.py index f2bc71ac33..9e15e15597 100644 --- a/server_addon/hiero/server/settings/imageio.py +++ b/server_addon/hiero/server/settings/imageio.py @@ -149,15 +149,15 @@ class ImageIOSettings(BaseSettingsModel): DEFAULT_IMAGEIO_SETTINGS = { "workfile": { - "ocioConfigName": "nuke-default", - "workingSpace": "linear", - "viewerLut": "sRGB", - "eightBitLut": "sRGB", - "sixteenBitLut": "sRGB", - "logLut": "Cineon", - "floatLut": "linear", - "thumbnailLut": "sRGB", - "monitorOutLut": "sRGB" + "ocioConfigName": "aces_1.2", + "workingSpace": "role_scene_linear", + "viewerLut": "ACES/sRGB", + "eightBitLut": "role_matte_paint", + "sixteenBitLut": "role_texture_paint", + "logLut": "role_compositing_log", + "floatLut": "role_scene_linear", + "thumbnailLut": "ACES/sRGB", + "monitorOutLut": "ACES/sRGB" }, "regexInputs": { "inputs": [ diff --git a/server_addon/hiero/server/version.py b/server_addon/hiero/server/version.py deleted file mode 100644 index b3f4756216..0000000000 --- a/server_addon/hiero/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.2" diff --git a/server_addon/houdini/package.py b/server_addon/houdini/package.py new file mode 100644 index 0000000000..06b034da38 --- /dev/null +++ b/server_addon/houdini/package.py @@ -0,0 +1,3 @@ +name = "houdini" +title = "Houdini" +version = "0.2.15" diff --git a/server_addon/houdini/server/__init__.py b/server_addon/houdini/server/__init__.py index 870ec2d0b7..8c1ffcb0b3 100644 --- a/server_addon/houdini/server/__init__.py +++ b/server_addon/houdini/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import HoudiniSettings, DEFAULT_VALUES class Houdini(BaseServerAddon): - name = "houdini" - title = "Houdini" - version = __version__ settings_model: Type[HoudiniSettings] = HoudiniSettings async def get_default_settings(self): diff --git a/server_addon/houdini/server/settings/imageio.py b/server_addon/houdini/server/settings/imageio.py index f4850c5df7..c4f4813d51 100644 --- a/server_addon/houdini/server/settings/imageio.py +++ b/server_addon/houdini/server/settings/imageio.py @@ -34,6 +34,34 @@ class ImageIOFileRulesModel(BaseSettingsModel): return value +class WorkfileImageIOModel(BaseSettingsModel): + """Workfile settings help. + + Empty values will be skipped, allowing any existing env vars to + pass through as defined. + + Note: The render space in Houdini is + always set to the 'scene_linear' role.""" + + enabled: bool = SettingsField(False, title="Enabled") + default_display: str = SettingsField( + title="Default active displays", + description="It behaves like the 'OCIO_ACTIVE_DISPLAYS' env var," + " Colon-separated list of displays, e.g ACES:P3" + ) + default_view: str = SettingsField( + title="Default active views", + description="It behaves like the 'OCIO_ACTIVE_VIEWS' env var," + " Colon-separated list of views, e.g sRGB:DCDM" + ) + review_color_space: str = SettingsField( + title="Review colorspace", + description="It exposes OCIO Colorspace parameter in opengl nodes." + "if left empty, Ayon will figure out the default " + "colorspace using your default display and default view." + ) + + class HoudiniImageIOModel(BaseSettingsModel): activate_host_color_management: bool = SettingsField( True, title="Enable Color Management" @@ -46,3 +74,26 @@ class HoudiniImageIOModel(BaseSettingsModel): default_factory=ImageIOFileRulesModel, title="File Rules" ) + workfile: WorkfileImageIOModel = SettingsField( + default_factory=WorkfileImageIOModel, + title="Workfile" + ) + + +DEFAULT_IMAGEIO_SETTINGS = { + "activate_host_color_management": False, + "ocio_config": { + "override_global_config": False, + "filepath": [] + }, + "file_rules": { + "activate_host_rules": False, + "rules": [] + }, + "workfile": { + "enabled": False, + "default_display": "ACES", + "default_view": "sRGB", + "review_color_space": "" + } +} diff --git a/server_addon/houdini/server/settings/main.py b/server_addon/houdini/server/settings/main.py index cbb19d15b7..3acab0ce74 100644 --- a/server_addon/houdini/server/settings/main.py +++ b/server_addon/houdini/server/settings/main.py @@ -3,7 +3,10 @@ from .general import ( GeneralSettingsModel, DEFAULT_GENERAL_SETTINGS ) -from .imageio import HoudiniImageIOModel +from .imageio import ( + HoudiniImageIOModel, + DEFAULT_IMAGEIO_SETTINGS +) from .shelves import ShelvesModel from .create import ( CreatePluginsModel, @@ -40,6 +43,7 @@ class HoudiniSettings(BaseSettingsModel): DEFAULT_VALUES = { "general": DEFAULT_GENERAL_SETTINGS, + "imageio": DEFAULT_IMAGEIO_SETTINGS, "shelves": [], "create": DEFAULT_HOUDINI_CREATE_SETTINGS, "publish": DEFAULT_HOUDINI_PUBLISH_SETTINGS diff --git a/server_addon/houdini/server/settings/publish.py b/server_addon/houdini/server/settings/publish.py index 8e0e7f7795..4a0c022f23 100644 --- a/server_addon/houdini/server/settings/publish.py +++ b/server_addon/houdini/server/settings/publish.py @@ -1,4 +1,7 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField +) # Publish Plugins @@ -20,6 +23,27 @@ class CollectChunkSizeModel(BaseSettingsModel): title="Frames Per Task") +class AOVFilterSubmodel(BaseSettingsModel): + """You should use the same host name you are using for Houdini.""" + host_name: str = SettingsField("", title="Houdini Host name") + value: list[str] = SettingsField( + default_factory=list, + title="AOV regex" + ) + +class CollectLocalRenderInstancesModel(BaseSettingsModel): + + use_deadline_aov_filter: bool = SettingsField( + False, + title="Use Deadline AOV Filter" + ) + + aov_filter: AOVFilterSubmodel = SettingsField( + default_factory=AOVFilterSubmodel, + title="Reviewable products filter" + ) + + class ValidateWorkfilePathsModel(BaseSettingsModel): enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") @@ -49,10 +73,10 @@ class PublishPluginsModel(BaseSettingsModel): default_factory=CollectChunkSizeModel, title="Collect Chunk Size." ) - ValidateContainers: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Latest Containers.", - section="Validators") + CollectLocalRenderInstances: CollectLocalRenderInstancesModel = SettingsField( + default_factory=CollectLocalRenderInstancesModel, + title="Collect Local Render Instances." + ) ValidateInstanceInContextHoudini: BasicValidateModel = SettingsField( default_factory=BasicValidateModel, title="Validate Instance is in same Context.") @@ -82,10 +106,14 @@ DEFAULT_HOUDINI_PUBLISH_SETTINGS = { "optional": True, "chunk_size": 999999 }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True + "CollectLocalRenderInstances": { + "use_deadline_aov_filter": False, + "aov_filter" : { + "host_name": "houdini", + "value": [ + ".*([Bb]eauty).*" + ] + } }, "ValidateInstanceInContextHoudini": { "enabled": True, diff --git a/server_addon/houdini/server/version.py b/server_addon/houdini/server/version.py deleted file mode 100644 index b5c9b6cb71..0000000000 --- a/server_addon/houdini/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.2.12" diff --git a/server_addon/max/package.py b/server_addon/max/package.py new file mode 100644 index 0000000000..fb1f1b3050 --- /dev/null +++ b/server_addon/max/package.py @@ -0,0 +1,3 @@ +name = "max" +title = "Max" +version = "0.1.7" diff --git a/server_addon/max/server/__init__.py b/server_addon/max/server/__init__.py index 31c694a084..d03b29d249 100644 --- a/server_addon/max/server/__init__.py +++ b/server_addon/max/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import MaxSettings, DEFAULT_VALUES class MaxAddon(BaseServerAddon): - name = "max" - title = "Max" - version = __version__ settings_model: Type[MaxSettings] = MaxSettings async def get_default_settings(self): diff --git a/server_addon/max/server/version.py b/server_addon/max/server/version.py deleted file mode 100644 index f1380eede2..0000000000 --- a/server_addon/max/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.7" diff --git a/server_addon/maya/package.py b/server_addon/maya/package.py new file mode 100644 index 0000000000..4537c23eaa --- /dev/null +++ b/server_addon/maya/package.py @@ -0,0 +1,3 @@ +name = "maya" +title = "Maya" +version = "0.1.20" diff --git a/server_addon/maya/server/__init__.py b/server_addon/maya/server/__init__.py index 8784427dcf..6dda2cdd77 100644 --- a/server_addon/maya/server/__init__.py +++ b/server_addon/maya/server/__init__.py @@ -2,13 +2,9 @@ from ayon_server.addons import BaseServerAddon from .settings.main import MayaSettings, DEFAULT_MAYA_SETTING -from .version import __version__ class MayaAddon(BaseServerAddon): - name = "maya" - title = "Maya" - version = __version__ settings_model = MayaSettings async def get_default_settings(self): diff --git a/server_addon/maya/server/settings/loaders.py b/server_addon/maya/server/settings/loaders.py index f59711b1e6..2f104d2858 100644 --- a/server_addon/maya/server/settings/loaders.py +++ b/server_addon/maya/server/settings/loaders.py @@ -103,6 +103,17 @@ class ImportLoaderModel(BaseSettingsModel): group_name: str = SettingsField(title="Group name") +class YetiRigLoaderModel(LoaderEnabledModel): + create_cache_instance_on_load: bool = SettingsField( + title="Create Yeti Cache instance on load", + description=( + "When enabled, upon loading a Yeti Rig product a new Yeti cache " + "instance is automatically created as preparation to publishing " + "the output directly." + ) + ) + + class LoadersModel(BaseSettingsModel): colors: ColorsSetting = SettingsField( default_factory=ColorsSetting, @@ -195,8 +206,8 @@ class LoadersModel(BaseSettingsModel): default_factory=LoaderEnabledModel, title="Yeti Cache Loader" ) - YetiRigLoader: LoaderEnabledModel = SettingsField( - default_factory=LoaderEnabledModel, + YetiRigLoader: YetiRigLoaderModel = SettingsField( + default_factory=YetiRigLoaderModel, title="Yeti Rig Loader" ) @@ -266,5 +277,8 @@ DEFAULT_LOADERS_SETTING = { "VRaySceneLoader": {"enabled": True}, "XgenLoader": {"enabled": True}, "YetiCacheLoader": {"enabled": True}, - "YetiRigLoader": {"enabled": True}, + "YetiRigLoader": { + "enabled": True, + "create_cache_instance_on_load": True + }, } diff --git a/server_addon/maya/server/settings/publishers.py b/server_addon/maya/server/settings/publishers.py index 27288053a2..9c552e17fa 100644 --- a/server_addon/maya/server/settings/publishers.py +++ b/server_addon/maya/server/settings/publishers.py @@ -35,6 +35,50 @@ def angular_unit_enum(): ] +def extract_alembic_data_format_enum(): + return [ + {"label": "ogawa", "value": "ogawa"}, + {"label": "HDF", "value": "HDF"} + ] + + +def extract_alembic_overrides_enum(): + return [ + {"label": "Custom Attributes", "value": "attr"}, + {"label": "Custom Attributes Prefix", "value": "attrPrefix"}, + {"label": "Data Format", "value": "dataFormat"}, + {"label": "Euler Filter", "value": "eulerFilter"}, + {"label": "Mel Per Frame Callback", "value": "melPerFrameCallback"}, + {"label": "Mel Post Job Callback", "value": "melPostJobCallback"}, + {"label": "Pre Roll", "value": "preRoll"}, + {"label": "Pre Roll Start Frame", "value": "preRollStartFrame"}, + { + "label": "Python Per Frame Callback", + "value": "pythonPerFrameCallback" + }, + { + "label": "Python Post Job Callback", + "value": "pythonPostJobCallback" + }, + {"label": "Renderable Only", "value": "renderableOnly"}, + {"label": "Strip Namespaces", "value": "stripNamespaces"}, + {"label": "User Attr", "value": "userAttr"}, + {"label": "User Attr Prefix", "value": "userAttrPrefix"}, + {"label": "UV Write", "value": "uvWrite"}, + {"label": "UVs Only", "value": "uvsOnly"}, + {"label": "Verbose", "value": "verbose"}, + {"label": "Visible Only", "value": "visibleOnly"}, + {"label": "Whole Frame Geo", "value": "wholeFrameGeo"}, + {"label": "World Space", "value": "worldSpace"}, + {"label": "Write Color Sets", "value": "writeColorSets"}, + {"label": "Write Creases", "value": "writeCreases"}, + {"label": "Write Face Sets", "value": "writeFaceSets"}, + {"label": "Write Normals", "value": "writeNormals"}, + {"label": "Write UV Sets", "value": "writeUVSets"}, + {"label": "Write Visibility", "value": "writeVisibility"} + ] + + class BasicValidateModel(BaseSettingsModel): enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") @@ -184,7 +228,7 @@ class ValidateAttributesModel(BaseSettingsModel): if not success: raise BadRequestException( - "The attibutes can't be parsed as json object" + "The attributes can't be parsed as json object" ) return value @@ -220,7 +264,7 @@ class ValidateUnrealStaticMeshNameModel(BaseSettingsModel): enabled: bool = SettingsField(title="ValidateUnrealStaticMeshName") optional: bool = SettingsField(title="Optional") validate_mesh: bool = SettingsField(title="Validate mesh names") - validate_collision: bool = SettingsField(title="Validate collison names") + validate_collision: bool = SettingsField(title="Validate collision names") class ValidateCycleErrorModel(BaseSettingsModel): @@ -243,7 +287,7 @@ class ValidatePluginPathAttributesModel(BaseSettingsModel): and the node attribute is abc_file """ - enabled: bool = True + enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") active: bool = SettingsField(title="Active") attribute: list[ValidatePluginPathAttributesAttrModel] = SettingsField( @@ -265,6 +309,9 @@ class RendererAttributesModel(BaseSettingsModel): class ValidateRenderSettingsModel(BaseSettingsModel): + enabled: bool = SettingsField(title="Enabled") + optional: bool = SettingsField(title="Optional") + active: bool = SettingsField(title="Active") arnold_render_attributes: list[RendererAttributesModel] = SettingsField( default_factory=list, title="Arnold Render Attributes") vray_render_attributes: list[RendererAttributesModel] = SettingsField( @@ -299,6 +346,108 @@ class ExtractAlembicModel(BaseSettingsModel): families: list[str] = SettingsField( default_factory=list, title="Families") + eulerFilter: bool = SettingsField( + title="Euler Filter", + description="Apply Euler filter while sampling rotations." + ) + renderableOnly: bool = SettingsField( + title="Renderable Only", + description="Only export renderable visible shapes." + ) + stripNamespaces: bool = SettingsField( + title="Strip Namespaces", + description=( + "Namespaces will be stripped off of the node before being written " + "to Alembic." + ) + ) + uvsOnly: bool = SettingsField( + title="UVs Only", + description=( + "If this flag is present, only uv data for PolyMesh and SubD " + "shapes will be written to the Alembic file." + ) + ) + uvWrite: bool = SettingsField( + title="UV Write", + description=( + "Uv data for PolyMesh and SubD shapes will be written to the " + "Alembic file." + ) + ) + verbose: bool = SettingsField( + title="Verbose", + description="Prints the current frame that is being evaluated." + ) + visibleOnly: bool = SettingsField( + title="Visible Only", + description="Only export dag objects visible during frame range." + ) + wholeFrameGeo: bool = SettingsField( + title="Whole Frame Geo", + description=( + "Data for geometry will only be written out on whole frames." + ) + ) + worldSpace: bool = SettingsField( + title="World Space", + description="Any root nodes will be stored in world space." + ) + writeColorSets: bool = SettingsField( + title="Write Color Sets", + description="Write vertex colors with the geometry." + ) + writeCreases: bool = SettingsField( + title="Write Creases", + description="Write the geometry's edge and vertex crease information." + ) + writeFaceSets: bool = SettingsField( + title="Write Face Sets", + description="Write face sets with the geometry." + ) + writeNormals: bool = SettingsField( + title="Write Normals", + description="Write normals with the deforming geometry." + ) + writeUVSets: bool = SettingsField( + title="Write UV Sets", + description=( + "Write all uv sets on MFnMeshes as vector 2 indexed geometry " + "parameters with face varying scope." + ) + ) + writeVisibility: bool = SettingsField( + title="Write Visibility", + description=( + "Visibility state will be stored in the Alembic file. Otherwise " + "everything written out is treated as visible." + ) + ) + preRoll: bool = SettingsField( + title="Pre Roll", + description=( + "When enabled, the pre roll start frame is used to pre roll the " + "When enabled, the pre roll start frame is used to being the " + "evaluation of the mesh. From the pre roll start frame to the " + "alembic start frame, will not be written to disk. This can be " + "used for simulation run up." + ) + ) + preRollStartFrame: int = SettingsField( + title="Pre Roll Start Frame", + description=( + "The frame to start scene evaluation at. This is used to set the " + "starting frame for time dependent translations and can be used to" + " evaluate run-up that isn't actually translated.\n" + "NOTE: Pre Roll needs to be enabled for this start frame " + "to be considered." + ) + ) + dataFormat: str = SettingsField( + enum_resolver=extract_alembic_data_format_enum, + title="Data Format", + description="The data format to use to write the file." + ) bake_attributes: list[str] = SettingsField( default_factory=list, title="Bake Attributes", description="List of attributes that will be included in the alembic " @@ -309,6 +458,73 @@ class ExtractAlembicModel(BaseSettingsModel): description="List of attribute prefixes for attributes that will be " "included in the alembic export.", ) + attr: str = SettingsField( + title="Custom Attributes", + placeholder="attr1;attr2", + description=( + "Attributes matching by name will be included in the Alembic " + "export. Attributes should be separated by semi-colon `;`" + ) + ) + attrPrefix: str = SettingsField( + title="Custom Attributes Prefix", + placeholder="prefix1;prefix2", + description=( + "Attributes starting with these prefixes will be included in the " + "Alembic export. Attributes should be separated by semi-colon `;`" + ) + ) + userAttr: str = SettingsField( + title="User Attr", + placeholder="attr1;attr2", + description=( + "Attributes matching by name will be included in the Alembic " + "export. Attributes should be separated by semi-colon `;`" + ) + ) + userAttrPrefix: str = SettingsField( + title="User Attr Prefix", + placeholder="prefix1;prefix2", + description=( + "Attributes starting with these prefixes will be included in the " + "Alembic export. Attributes should be separated by semi-colon `;`" + ) + ) + melPerFrameCallback: str = SettingsField( + title="Mel Per Frame Callback", + description=( + "When each frame (and the static frame) is evaluated the string " + "specified is evaluated as a Mel command." + ) + ) + melPostJobCallback: str = SettingsField( + title="Mel Post Job Callback", + description=( + "When the translation has finished the string specified is " + "evaluated as a Mel command." + ) + ) + pythonPerFrameCallback: str = SettingsField( + title="Python Per Frame Callback", + description=( + "When each frame (and the static frame) is evaluated the string " + "specified is evaluated as a python command." + ) + ) + pythonPostJobCallback: str = SettingsField( + title="Python Post Job Callback", + description=( + "When the translation has finished the string specified is " + "evaluated as a python command." + ) + ) + overrides: list[str] = SettingsField( + enum_resolver=extract_alembic_overrides_enum, + title="Exposed Overrides", + description=( + "Expose the attribute in this list to the user when publishing." + ) + ) class ExtractObjModel(BaseSettingsModel): @@ -392,7 +608,7 @@ class ExtractGPUCacheModel(BaseSettingsModel): title="Optimize Animations For Motion Blur" ) writeMaterials: bool = SettingsField(title="Write Materials") - useBaseTessellation: bool = SettingsField(title="User Base Tesselation") + useBaseTessellation: bool = SettingsField(title="User Based Tessellation") class PublishersModel(BaseSettingsModel): @@ -418,10 +634,6 @@ class PublishersModel(BaseSettingsModel): title="Validate Instance In Context", section="Validators" ) - ValidateContainers: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Containers" - ) ValidateFrameRange: ValidateFrameRangeModel = SettingsField( default_factory=ValidateFrameRangeModel, title="Validate Frame Range" @@ -668,15 +880,19 @@ class PublishersModel(BaseSettingsModel): default_factory=BasicValidateModel, title="Validate Alembic Visible Node", ) + ValidateAlembicDefaultsPointcache: BasicValidateModel = SettingsField( + default_factory=BasicValidateModel, + title="Validate Alembic Defaults Pointcache" + ) + ValidateAlembicDefaultsAnimation: BasicValidateModel = SettingsField( + default_factory=BasicValidateModel, + title="Validate Alembic Defaults Animation" + ) ExtractProxyAlembic: ExtractProxyAlembicModel = SettingsField( default_factory=ExtractProxyAlembicModel, title="Extract Proxy Alembic", section="Model Extractors", ) - ExtractAlembic: ExtractAlembicModel = SettingsField( - default_factory=ExtractAlembicModel, - title="Extract Alembic", - ) ExtractObj: ExtractObjModel = SettingsField( default_factory=ExtractObjModel, title="Extract OBJ" @@ -697,10 +913,6 @@ class PublishersModel(BaseSettingsModel): default_factory=BasicValidateModel, title="Validate Rig Controllers", ) - ValidateAnimatedReferenceRig: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Animated Reference Rig", - ) ValidateAnimationContent: BasicValidateModel = SettingsField( default_factory=BasicValidateModel, title="Validate Animation Content", @@ -811,6 +1023,10 @@ class PublishersModel(BaseSettingsModel): default_factory=ExtractModelModel, title="Extract Model (Maya Scene)" ) + ExtractAlembic: ExtractAlembicModel = SettingsField( + default_factory=ExtractAlembicModel, + title="Extract Alembic" + ) DEFAULT_SUFFIX_NAMING = { @@ -839,11 +1055,6 @@ DEFAULT_PUBLISH_SETTINGS = { "optional": True, "active": True }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateFrameRange": { "enabled": True, "optional": True, @@ -942,6 +1153,9 @@ DEFAULT_PUBLISH_SETTINGS = { ] }, "ValidateRenderSettings": { + "enabled": True, + "active": True, + "optional": False, "arnold_render_attributes": [], "vray_render_attributes": [], "redshift_render_attributes": [], @@ -1200,16 +1414,6 @@ DEFAULT_PUBLISH_SETTINGS = { "proxyAbc" ] }, - "ExtractAlembic": { - "enabled": True, - "families": [ - "pointcache", - "model", - "vrayproxy.alembic" - ], - "bake_attributes": [], - "bake_attribute_prefixes": [] - }, "ExtractObj": { "enabled": False, "optional": True, @@ -1230,11 +1434,6 @@ DEFAULT_PUBLISH_SETTINGS = { "optional": True, "active": True }, - "ValidateAnimatedReferenceRig": { - "enabled": True, - "optional": False, - "active": True - }, "ValidateAnimationContent": { "enabled": True, "optional": False, @@ -1330,6 +1529,16 @@ DEFAULT_PUBLISH_SETTINGS = { "optional": False, "validate_shapes": True }, + "ValidateAlembicDefaultsPointcache": { + "enabled": True, + "optional": True, + "active": True + }, + "ValidateAlembicDefaultsAnimation": { + "enabled": True, + "optional": True, + "active": True + }, "ExtractPlayblast": DEFAULT_PLAYBLAST_SETTING, "ExtractMayaSceneRaw": { "enabled": True, @@ -1371,6 +1580,52 @@ DEFAULT_PUBLISH_SETTINGS = { "ExtractModel": { "enabled": True, "optional": True, - "active": True, + "active": True + }, + "ExtractAlembic": { + "enabled": True, + "families": [ + "pointcache", + "model", + "vrayproxy.alembic" + ], + "attr": "", + "attrPrefix": "", + "bake_attributes": [], + "bake_attribute_prefixes": [], + "dataFormat": "ogawa", + "eulerFilter": False, + "melPerFrameCallback": "", + "melPostJobCallback": "", + "overrides": [ + "attr", + "attrPrefix", + "renderableOnly", + "visibleOnly", + "worldSpace", + "writeColorSets", + "writeFaceSets", + "writeNormals" + ], + "preRoll": False, + "preRollStartFrame": 0, + "pythonPerFrameCallback": "", + "pythonPostJobCallback": "", + "renderableOnly": False, + "stripNamespaces": True, + "uvsOnly": False, + "uvWrite": True, + "userAttr": "", + "userAttrPrefix": "", + "verbose": False, + "visibleOnly": False, + "wholeFrameGeo": False, + "worldSpace": True, + "writeColorSets": False, + "writeCreases": False, + "writeFaceSets": False, + "writeNormals": True, + "writeUVSets": False, + "writeVisibility": False } } diff --git a/server_addon/maya/server/version.py b/server_addon/maya/server/version.py deleted file mode 100644 index 75b463f198..0000000000 --- a/server_addon/maya/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.15" diff --git a/server_addon/nuke/package.py b/server_addon/nuke/package.py new file mode 100644 index 0000000000..bc166bd14e --- /dev/null +++ b/server_addon/nuke/package.py @@ -0,0 +1,3 @@ +name = "nuke" +title = "Nuke" +version = "0.1.13" diff --git a/server_addon/nuke/server/__init__.py b/server_addon/nuke/server/__init__.py index 032ceea5fb..aeb5e36675 100644 --- a/server_addon/nuke/server/__init__.py +++ b/server_addon/nuke/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import NukeSettings, DEFAULT_VALUES class NukeAddon(BaseServerAddon): - name = "nuke" - title = "Nuke" - version = __version__ settings_model: Type[NukeSettings] = NukeSettings async def get_default_settings(self): diff --git a/server_addon/nuke/server/settings/imageio.py b/server_addon/nuke/server/settings/imageio.py index 1b84457133..9cdb0bf1d7 100644 --- a/server_addon/nuke/server/settings/imageio.py +++ b/server_addon/nuke/server/settings/imageio.py @@ -97,8 +97,23 @@ class WorkfileColorspaceSettings(BaseSettingsModel): working_space: str = SettingsField( title="Working Space" ) - thumbnail_space: str = SettingsField( - title="Thumbnail Space" + monitor_lut: str = SettingsField( + title="Thumbnails" + ) + monitor_out_lut: str = SettingsField( + title="Monitor Out" + ) + int_8_lut: str = SettingsField( + title="8-bit Files" + ) + int_16_lut: str = SettingsField( + title="16-bit Files" + ) + log_lut: str = SettingsField( + title="Log Files" + ) + float_lut: str = SettingsField( + title="Float Files" ) @@ -120,6 +135,9 @@ class ViewProcessModel(BaseSettingsModel): viewerProcess: str = SettingsField( title="Viewer Process Name" ) + output_transform: str = SettingsField( + title="Output Transform" + ) class ImageIOConfigModel(BaseSettingsModel): @@ -214,16 +232,23 @@ class ImageIOSettings(BaseSettingsModel): DEFAULT_IMAGEIO_SETTINGS = { "viewer": { - "viewerProcess": "sRGB (default)" + "viewerProcess": "ACES/sRGB", + "output_transform": "ACES/sRGB" }, "baking": { - "viewerProcess": "rec709 (default)" + "viewerProcess": "ACES/Rec.709", + "output_transform": "ACES/Rec.709" }, "workfile": { "color_management": "OCIO", - "native_ocio_config": "nuke-default", - "working_space": "scene_linear", - "thumbnail_space": "sRGB (default)", + "native_ocio_config": "aces_1.2", + "working_space": "role_scene_linear", + "monitor_lut": "ACES/sRGB", + "monitor_out_lut": "ACES/sRGB", + "int_8_lut": "role_matte_paint", + "int_16_lut": "role_texture_paint", + "log_lut": "role_compositing_log", + "float_lut": "role_scene_linear" }, "nodes": { "required_nodes": [ diff --git a/server_addon/nuke/server/settings/publish_plugins.py b/server_addon/nuke/server/settings/publish_plugins.py index d5b05d8715..6c37ecd37a 100644 --- a/server_addon/nuke/server/settings/publish_plugins.py +++ b/server_addon/nuke/server/settings/publish_plugins.py @@ -125,6 +125,7 @@ class ReformatNodesConfigModel(BaseSettingsModel): class IntermediateOutputModel(BaseSettingsModel): name: str = SettingsField(title="Output name") + publish: bool = SettingsField(title="Publish") filter: BakingStreamFilterModel = SettingsField( title="Filter", default_factory=BakingStreamFilterModel) read_raw: bool = SettingsField( @@ -230,10 +231,6 @@ class PublishPluginsModel(BaseSettingsModel): default_factory=OptionalPluginModel, section="Validators" ) - ValidateContainers: OptionalPluginModel = SettingsField( - title="Validate Containers", - default_factory=OptionalPluginModel - ) ValidateKnobs: ValidateKnobsModel = SettingsField( title="Validate Knobs", default_factory=ValidateKnobsModel @@ -299,11 +296,6 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = { "optional": True, "active": True }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateKnobs": { "enabled": False, "knobs": "\n".join([ @@ -346,6 +338,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = { "outputs": [ { "name": "baking", + "publish": False, "filter": { "task_types": [], "product_types": [], @@ -401,6 +394,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = { "outputs": [ { "name": "baking", + "publish": False, "filter": { "task_types": [], "product_types": [], diff --git a/server_addon/nuke/server/version.py b/server_addon/nuke/server/version.py deleted file mode 100644 index 569b1212f7..0000000000 --- a/server_addon/nuke/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.10" diff --git a/server_addon/photoshop/package.py b/server_addon/photoshop/package.py new file mode 100644 index 0000000000..22043f951c --- /dev/null +++ b/server_addon/photoshop/package.py @@ -0,0 +1,3 @@ +name = "photoshop" +title = "Photoshop" +version = "0.1.3" diff --git a/server_addon/photoshop/server/__init__.py b/server_addon/photoshop/server/__init__.py index 3a45f7a809..86d1025a2d 100644 --- a/server_addon/photoshop/server/__init__.py +++ b/server_addon/photoshop/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon from .settings import PhotoshopSettings, DEFAULT_PHOTOSHOP_SETTING -from .version import __version__ class Photoshop(BaseServerAddon): - name = "photoshop" - title = "Photoshop" - version = __version__ - settings_model = PhotoshopSettings async def get_default_settings(self): diff --git a/server_addon/photoshop/server/settings/publish_plugins.py b/server_addon/photoshop/server/settings/publish_plugins.py index d04faaf53a..149b08beb4 100644 --- a/server_addon/photoshop/server/settings/publish_plugins.py +++ b/server_addon/photoshop/server/settings/publish_plugins.py @@ -83,14 +83,6 @@ class CollectVersionPlugin(BaseSettingsModel): enabled: bool = SettingsField(True, title="Enabled") -class ValidateContainersPlugin(BaseSettingsModel): - """Check that workfile contains latest version of loaded items""" # noqa - _isGroup = True - enabled: bool = True - optional: bool = SettingsField(False, title="Optional") - active: bool = SettingsField(True, title="Active") - - class ValidateNamingPlugin(BaseSettingsModel): """Validate naming of products and layers""" # noqa invalid_chars: str = SettingsField( @@ -154,11 +146,6 @@ class PhotoshopPublishPlugins(BaseSettingsModel): default_factory=CollectVersionPlugin, ) - ValidateContainers: ValidateContainersPlugin = SettingsField( - title="Validate Containers", - default_factory=ValidateContainersPlugin, - ) - ValidateNaming: ValidateNamingPlugin = SettingsField( title="Validate naming of products and layers", default_factory=ValidateNamingPlugin, @@ -187,11 +174,6 @@ DEFAULT_PUBLISH_SETTINGS = { "CollectVersion": { "enabled": False }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateNaming": { "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,;]", "replace_char": "_" diff --git a/server_addon/photoshop/server/version.py b/server_addon/photoshop/server/version.py deleted file mode 100644 index df0c92f1e2..0000000000 --- a/server_addon/photoshop/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.2" diff --git a/server_addon/resolve/package.py b/server_addon/resolve/package.py new file mode 100644 index 0000000000..cf92413bce --- /dev/null +++ b/server_addon/resolve/package.py @@ -0,0 +1,3 @@ +name = "resolve" +title = "DaVinci Resolve" +version = "0.1.0" diff --git a/server_addon/resolve/server/__init__.py b/server_addon/resolve/server/__init__.py index a84180d0f5..35d2db19e4 100644 --- a/server_addon/resolve/server/__init__.py +++ b/server_addon/resolve/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import ResolveSettings, DEFAULT_VALUES class ResolveAddon(BaseServerAddon): - name = "resolve" - title = "DaVinci Resolve" - version = __version__ settings_model: Type[ResolveSettings] = ResolveSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/resolve/server/version.py b/server_addon/resolve/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/resolve/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/royal_render/server/version.py b/server_addon/royal_render/server/version.py deleted file mode 100644 index 485f44ac21..0000000000 --- a/server_addon/royal_render/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.1" diff --git a/server_addon/royalrender/package.py b/server_addon/royalrender/package.py new file mode 100644 index 0000000000..1fdea4abbb --- /dev/null +++ b/server_addon/royalrender/package.py @@ -0,0 +1,3 @@ +name = "royalrender" +title = "Royal Render" +version = "0.1.1" diff --git a/server_addon/royal_render/server/__init__.py b/server_addon/royalrender/server/__init__.py similarity index 77% rename from server_addon/royal_render/server/__init__.py rename to server_addon/royalrender/server/__init__.py index c5f0aafa00..5b10678136 100644 --- a/server_addon/royal_render/server/__init__.py +++ b/server_addon/royalrender/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import RoyalRenderSettings, DEFAULT_VALUES class RoyalRenderAddon(BaseServerAddon): - name = "royalrender" - version = __version__ - title = "Royal Render" settings_model: Type[RoyalRenderSettings] = RoyalRenderSettings async def get_default_settings(self): diff --git a/server_addon/royal_render/server/settings.py b/server_addon/royalrender/server/settings.py similarity index 100% rename from server_addon/royal_render/server/settings.py rename to server_addon/royalrender/server/settings.py diff --git a/server_addon/substancepainter/package.py b/server_addon/substancepainter/package.py new file mode 100644 index 0000000000..d445b0059f --- /dev/null +++ b/server_addon/substancepainter/package.py @@ -0,0 +1,3 @@ +name = "substancepainter" +title = "Substance Painter" +version = "0.1.1" diff --git a/server_addon/substancepainter/server/__init__.py b/server_addon/substancepainter/server/__init__.py index 2bf808d508..f6cd51e610 100644 --- a/server_addon/substancepainter/server/__init__.py +++ b/server_addon/substancepainter/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import SubstancePainterSettings, DEFAULT_SPAINTER_SETTINGS class SubstancePainterAddon(BaseServerAddon): - name = "substancepainter" - title = "Substance Painter" - version = __version__ settings_model: Type[SubstancePainterSettings] = SubstancePainterSettings async def get_default_settings(self): diff --git a/server_addon/substancepainter/server/settings/load_plugins.py b/server_addon/substancepainter/server/settings/load_plugins.py new file mode 100644 index 0000000000..e6b2fd86c3 --- /dev/null +++ b/server_addon/substancepainter/server/settings/load_plugins.py @@ -0,0 +1,122 @@ +from ayon_server.settings import BaseSettingsModel, SettingsField + + +def normal_map_format_enum(): + return [ + {"label": "DirectX", "value": "NormalMapFormat.DirectX"}, + {"label": "OpenGL", "value": "NormalMapFormat.OpenGL"}, + ] + + +def tangent_space_enum(): + return [ + {"label": "Per Fragment", "value": "TangentSpace.PerFragment"}, + {"label": "Per Vertex", "value": "TangentSpace.PerVertex"}, + ] + + +def uv_workflow_enum(): + return [ + {"label": "Default", "value": "ProjectWorkflow.Default"}, + {"label": "UV Tile", "value": "ProjectWorkflow.UVTile"}, + {"label": "Texture Set Per UV Tile", + "value": "ProjectWorkflow.TextureSetPerUVTile"} + ] + + +def document_resolution_enum(): + return [ + {"label": "128", "value": 128}, + {"label": "256", "value": 256}, + {"label": "512", "value": 512}, + {"label": "1024", "value": 1024}, + {"label": "2048", "value": 2048}, + {"label": "4096", "value": 4096} + ] + + +class ProjectTemplatesModel(BaseSettingsModel): + _layout = "expanded" + name: str = SettingsField("default", title="Template Name") + default_texture_resolution: int = SettingsField( + 1024, enum_resolver=document_resolution_enum, + title="Document Resolution", + description=("Set texture resolution when " + "creating new project.") + ) + import_cameras: bool = SettingsField( + True, title="Import Cameras", + description="Import cameras from the mesh file.") + normal_map_format: str = SettingsField( + "DirectX", enum_resolver=normal_map_format_enum, + title="Normal Map Format", + description=("Set normal map format when " + "creating new project.") + ) + project_workflow: str = SettingsField( + "Default", enum_resolver=uv_workflow_enum, + title="UV Tile Settings", + description=("Set UV workflow when " + "creating new project.") + ) + tangent_space_mode: str = SettingsField( + "PerFragment", enum_resolver=tangent_space_enum, + title="Tangent Space", + description=("An option to compute tangent space " + "when creating new project.") + ) + preserve_strokes: bool = SettingsField( + True, title="Preserve Strokes", + description=("Preserve strokes positions on mesh.\n" + "(only relevant when loading into " + "existing project)") + ) + + +class ProjectTemplateSettingModel(BaseSettingsModel): + project_templates: list[ProjectTemplatesModel] = SettingsField( + default_factory=ProjectTemplatesModel, + title="Project Templates" + ) + + +class LoadersModel(BaseSettingsModel): + SubstanceLoadProjectMesh: ProjectTemplateSettingModel = SettingsField( + default_factory=ProjectTemplateSettingModel, + title="Load Mesh" + ) + + +DEFAULT_LOADER_SETTINGS = { + "SubstanceLoadProjectMesh": { + "project_templates": [ + { + "name": "2K(Default)", + "default_texture_resolution": 2048, + "import_cameras": True, + "normal_map_format": "NormalMapFormat.DirectX", + "project_workflow": "ProjectWorkflow.Default", + "tangent_space_mode": "TangentSpace.PerFragment", + "preserve_strokes": True + }, + { + "name": "2K(UV tile)", + "default_texture_resolution": 2048, + "import_cameras": True, + "normal_map_format": "NormalMapFormat.DirectX", + "project_workflow": "ProjectWorkflow.UVTile", + "tangent_space_mode": "TangentSpace.PerFragment", + "preserve_strokes": True + }, + { + "name": "4K(Custom)", + "default_texture_resolution": 4096, + "import_cameras": True, + "normal_map_format": "NormalMapFormat.OpenGL", + "project_workflow": "ProjectWorkflow.UVTile", + "tangent_space_mode": "TangentSpace.PerFragment", + "preserve_strokes": True + } + ] + } +} diff --git a/server_addon/substancepainter/server/settings/main.py b/server_addon/substancepainter/server/settings/main.py index f80fa9fe1e..93523fd650 100644 --- a/server_addon/substancepainter/server/settings/main.py +++ b/server_addon/substancepainter/server/settings/main.py @@ -1,5 +1,6 @@ from ayon_server.settings import BaseSettingsModel, SettingsField from .imageio import ImageIOSettings, DEFAULT_IMAGEIO_SETTINGS +from .load_plugins import LoadersModel, DEFAULT_LOADER_SETTINGS class ShelvesSettingsModel(BaseSettingsModel): @@ -17,9 +18,12 @@ class SubstancePainterSettings(BaseSettingsModel): default_factory=list, title="Shelves" ) + load: LoadersModel = SettingsField( + default_factory=DEFAULT_LOADER_SETTINGS, title="Loaders") DEFAULT_SPAINTER_SETTINGS = { "imageio": DEFAULT_IMAGEIO_SETTINGS, - "shelves": [] + "shelves": [], + "load": DEFAULT_LOADER_SETTINGS, } diff --git a/server_addon/substancepainter/server/version.py b/server_addon/substancepainter/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/substancepainter/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/timers_manager/package.py b/server_addon/timers_manager/package.py new file mode 100644 index 0000000000..bd6b81b4b7 --- /dev/null +++ b/server_addon/timers_manager/package.py @@ -0,0 +1,3 @@ +name = "timers_manager" +title = "Timers Manager" +version = "0.1.1" diff --git a/server_addon/timers_manager/server/__init__.py b/server_addon/timers_manager/server/__init__.py index 29f9d47370..32e83d295c 100644 --- a/server_addon/timers_manager/server/__init__.py +++ b/server_addon/timers_manager/server/__init__.py @@ -2,12 +2,8 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import TimersManagerSettings class TimersManagerAddon(BaseServerAddon): - name = "timers_manager" - version = __version__ - title = "Timers Manager" settings_model: Type[TimersManagerSettings] = TimersManagerSettings diff --git a/server_addon/timers_manager/server/version.py b/server_addon/timers_manager/server/version.py deleted file mode 100644 index 485f44ac21..0000000000 --- a/server_addon/timers_manager/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.1" diff --git a/server_addon/traypublisher/package.py b/server_addon/traypublisher/package.py new file mode 100644 index 0000000000..c138a2296d --- /dev/null +++ b/server_addon/traypublisher/package.py @@ -0,0 +1,3 @@ +name = "traypublisher" +title = "TrayPublisher" +version = "0.1.5" diff --git a/server_addon/traypublisher/server/__init__.py b/server_addon/traypublisher/server/__init__.py index e6f079609f..830f325ac0 100644 --- a/server_addon/traypublisher/server/__init__.py +++ b/server_addon/traypublisher/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import TraypublisherSettings, DEFAULT_TRAYPUBLISHER_SETTING class Traypublisher(BaseServerAddon): - name = "traypublisher" - title = "TrayPublisher" - version = __version__ - settings_model = TraypublisherSettings async def get_default_settings(self): diff --git a/server_addon/traypublisher/server/settings/creator_plugins.py b/server_addon/traypublisher/server/settings/creator_plugins.py index bf66d9a088..1ff14002aa 100644 --- a/server_addon/traypublisher/server/settings/creator_plugins.py +++ b/server_addon/traypublisher/server/settings/creator_plugins.py @@ -1,4 +1,7 @@ +from pydantic import validator from ayon_server.settings import BaseSettingsModel, SettingsField +from ayon_server.settings.validators import ensure_unique_names +from ayon_server.exceptions import BadRequestException class BatchMovieCreatorPlugin(BaseSettingsModel): @@ -22,11 +25,139 @@ class BatchMovieCreatorPlugin(BaseSettingsModel): ) +class ColumnItemModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + name: str = SettingsField( + title="Name", + default="" + ) + + type: str = SettingsField( + title="Type", + default="" + ) + + default: str = SettingsField( + title="Default", + default="" + ) + + required_column: bool = SettingsField( + title="Required Column", + default=False + ) + + validation_pattern: str = SettingsField( + title="Validation Regex Pattern", + default="^(.*)$" + ) + + +class ColumnConfigModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + csv_delimiter: str = SettingsField( + title="CSV delimiter", + default="," + ) + + columns: list[ColumnItemModel] = SettingsField( + title="Columns", + default_factory=list + ) + + @validator("columns") + def validate_unique_outputs(cls, value): + ensure_unique_names(value) + return value + + +class RepresentationItemModel(BaseSettingsModel): + """Allows to publish multiple video files in one go. + + Name of matching asset is parsed from file names + ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov') + """ + + name: str = SettingsField( + title="Name", + default="" + ) + + extensions: list[str] = SettingsField( + title="Extensions", + default_factory=list + ) + + @validator("extensions") + def validate_extension(cls, value): + for ext in value: + if not ext.startswith("."): + raise BadRequestException(f"Extension must start with '.': {ext}") + return value + + +class RepresentationConfigModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + tags_delimiter: str = SettingsField( + title="Tags delimiter", + default=";" + ) + + default_tags: list[str] = SettingsField( + title="Default tags", + default_factory=list + ) + + representations: list[RepresentationItemModel] = SettingsField( + title="Representations", + default_factory=list + ) + + @validator("representations") + def validate_unique_outputs(cls, value): + ensure_unique_names(value) + return value + + +class IngestCSVPluginModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + enabled: bool = SettingsField( + title="Enabled", + default=False + ) + + columns_config: ColumnConfigModel = SettingsField( + title="Columns config", + default_factory=ColumnConfigModel + ) + + representations_config: RepresentationConfigModel = SettingsField( + title="Representations config", + default_factory=RepresentationConfigModel + ) + + class TrayPublisherCreatePluginsModel(BaseSettingsModel): BatchMovieCreator: BatchMovieCreatorPlugin = SettingsField( title="Batch Movie Creator", default_factory=BatchMovieCreatorPlugin ) + IngestCSV: IngestCSVPluginModel = SettingsField( + title="Ingest CSV", + default_factory=IngestCSVPluginModel + ) DEFAULT_CREATORS = { @@ -41,4 +172,170 @@ DEFAULT_CREATORS = { ".mov" ] }, + "IngestCSV": { + "enabled": True, + "columns_config": { + "csv_delimiter": ",", + "columns": [ + { + "name": "File Path", + "type": "text", + "default": "", + "required_column": True, + "validation_pattern": "^([a-z0-9#._\\/]*)$" + }, + { + "name": "Folder Path", + "type": "text", + "default": "", + "required_column": True, + "validation_pattern": "^([a-zA-Z0-9_\\/]*)$" + }, + { + "name": "Task Name", + "type": "text", + "default": "", + "required_column": True, + "validation_pattern": "^(.*)$" + }, + { + "name": "Product Type", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Variant", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Version", + "type": "number", + "default": 1, + "required_column": True, + "validation_pattern": "^(\\d{1,3})$" + }, + { + "name": "Version Comment", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Version Thumbnail", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^([a-zA-Z0-9#._\\/]*)$" + }, + { + "name": "Frame Start", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d{1,8})$" + }, + { + "name": "Frame End", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d{1,8})$" + }, + { + "name": "Handle Start", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d)$" + }, + { + "name": "Handle End", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d)$" + }, + { + "name": "FPS", + "type": "decimal", + "default": 0.0, + "required_column": True, + "validation_pattern": "^[0-9]*\\.[0-9]+$|^[0-9]+$" + }, + { + "name": "Slate Exists", + "type": "bool", + "default": True, + "required_column": False, + "validation_pattern": "(True|False)" + }, + { + "name": "Representation", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Representation Colorspace", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Representation Tags", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + } + ] + }, + "representations_config": { + "tags_delimiter": ";", + "default_tags": [ + "review" + ], + "representations": [ + { + "name": "preview", + "extensions": [ + ".mp4", + ".mov" + ] + }, + { + "name": "exr", + "extensions": [ + ".exr" + ] + }, + { + "name": "edit", + "extensions": [ + ".mov" + ] + }, + { + "name": "review", + "extensions": [ + ".mov" + ] + }, + { + "name": "nuke", + "extensions": [ + ".nk" + ] + } + ] + } + } } diff --git a/server_addon/traypublisher/server/settings/publish_plugins.py b/server_addon/traypublisher/server/settings/publish_plugins.py index f413c86227..99a0bbf107 100644 --- a/server_addon/traypublisher/server/settings/publish_plugins.py +++ b/server_addon/traypublisher/server/settings/publish_plugins.py @@ -1,4 +1,7 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField, +) class ValidatePluginModel(BaseSettingsModel): @@ -14,6 +17,45 @@ class ValidateFrameRangeModel(ValidatePluginModel): 'my_asset_to_publish.mov')""" +class ExtractEditorialPckgFFmpegModel(BaseSettingsModel): + video_filters: list[str] = SettingsField( + default_factory=list, + title="Video filters" + ) + audio_filters: list[str] = SettingsField( + default_factory=list, + title="Audio filters" + ) + input: list[str] = SettingsField( + default_factory=list, + title="Input arguments" + ) + output: list[str] = SettingsField( + default_factory=list, + title="Output arguments" + ) + + +class ExtractEditorialPckgOutputDefModel(BaseSettingsModel): + _layout = "expanded" + ext: str = SettingsField("", title="Output extension") + + ffmpeg_args: ExtractEditorialPckgFFmpegModel = SettingsField( + default_factory=ExtractEditorialPckgFFmpegModel, + title="FFmpeg arguments" + ) + + +class ExtractEditorialPckgConversionModel(BaseSettingsModel): + """Set output definition if resource files should be converted.""" + conversion_enabled: bool = SettingsField(True, + title="Conversion enabled") + output: ExtractEditorialPckgOutputDefModel = SettingsField( + default_factory=ExtractEditorialPckgOutputDefModel, + title="Output Definitions", + ) + + class TrayPublisherPublishPlugins(BaseSettingsModel): CollectFrameDataFromAssetEntity: ValidatePluginModel = SettingsField( default_factory=ValidatePluginModel, @@ -28,6 +70,13 @@ class TrayPublisherPublishPlugins(BaseSettingsModel): default_factory=ValidatePluginModel, ) + ExtractEditorialPckgConversion: ExtractEditorialPckgConversionModel = ( + SettingsField( + default_factory=ExtractEditorialPckgConversionModel, + title="Extract Editorial Package Conversion" + ) + ) + DEFAULT_PUBLISH_PLUGINS = { "CollectFrameDataFromAssetEntity": { @@ -44,5 +93,24 @@ DEFAULT_PUBLISH_PLUGINS = { "enabled": True, "optional": True, "active": True + }, + "ExtractEditorialPckgConversion": { + "optional": False, + "conversion_enabled": True, + "output": { + "ext": "", + "ffmpeg_args": { + "video_filters": [], + "audio_filters": [], + "input": [ + "-apply_trc gamma22" + ], + "output": [ + "-pix_fmt yuv420p", + "-crf 18", + "-intra" + ] + } + } } } diff --git a/server_addon/traypublisher/server/version.py b/server_addon/traypublisher/server/version.py deleted file mode 100644 index de699158fd..0000000000 --- a/server_addon/traypublisher/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.4" diff --git a/server_addon/tvpaint/package.py b/server_addon/tvpaint/package.py new file mode 100644 index 0000000000..2be3164f4a --- /dev/null +++ b/server_addon/tvpaint/package.py @@ -0,0 +1,3 @@ +name = "tvpaint" +title = "TVPaint" +version = "0.1.2" diff --git a/server_addon/tvpaint/server/__init__.py b/server_addon/tvpaint/server/__init__.py index 033d7d3792..658dcf0bb6 100644 --- a/server_addon/tvpaint/server/__init__.py +++ b/server_addon/tvpaint/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import TvpaintSettings, DEFAULT_VALUES class TvpaintAddon(BaseServerAddon): - name = "tvpaint" - title = "TVPaint" - version = __version__ settings_model: Type[TvpaintSettings] = TvpaintSettings async def get_default_settings(self): diff --git a/server_addon/tvpaint/server/version.py b/server_addon/tvpaint/server/version.py deleted file mode 100644 index b3f4756216..0000000000 --- a/server_addon/tvpaint/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.2" diff --git a/server_addon/unreal/package.py b/server_addon/unreal/package.py new file mode 100644 index 0000000000..cab89ca873 --- /dev/null +++ b/server_addon/unreal/package.py @@ -0,0 +1,3 @@ +name = "unreal" +title = "Unreal" +version = "0.1.0" diff --git a/server_addon/unreal/server/__init__.py b/server_addon/unreal/server/__init__.py index a5f3e9597d..751560b623 100644 --- a/server_addon/unreal/server/__init__.py +++ b/server_addon/unreal/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import UnrealSettings, DEFAULT_VALUES class UnrealAddon(BaseServerAddon): - name = "unreal" - title = "Unreal" - version = __version__ settings_model: Type[UnrealSettings] = UnrealSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/unreal/server/version.py b/server_addon/unreal/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/unreal/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0"