diff --git a/client/ayon_core/hooks/pre_ocio_hook.py b/client/ayon_core/hooks/pre_ocio_hook.py index 0817afec71..6c30b267bc 100644 --- a/client/ayon_core/hooks/pre_ocio_hook.py +++ b/client/ayon_core/hooks/pre_ocio_hook.py @@ -1,7 +1,7 @@ from ayon_applications import PreLaunchHook -from ayon_core.pipeline.colorspace import get_imageio_config -from ayon_core.pipeline.template_data import get_template_data_with_names +from ayon_core.pipeline.colorspace import get_imageio_config_preset +from ayon_core.pipeline.template_data import get_template_data class OCIOEnvHook(PreLaunchHook): @@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook): def execute(self): """Hook entry method.""" - template_data = get_template_data_with_names( - project_name=self.data["project_name"], - folder_path=self.data["folder_path"], - task_name=self.data["task_name"], + folder_entity = self.data["folder_entity"] + + template_data = get_template_data( + self.data["project_entity"], + folder_entity=folder_entity, + task_entity=self.data["task_entity"], host_name=self.host_name, - settings=self.data["project_settings"] + settings=self.data["project_settings"], ) - config_data = get_imageio_config( - project_name=self.data["project_name"], - host_name=self.host_name, - project_settings=self.data["project_settings"], - anatomy_data=template_data, + config_data = get_imageio_config_preset( + self.data["project_name"], + self.data["folder_path"], + self.data["task_name"], + self.host_name, anatomy=self.data["anatomy"], + project_settings=self.data["project_settings"], + template_data=template_data, env=self.launch_context.env, + folder_id=folder_entity["id"], ) - if config_data: - ocio_path = config_data["path"] - - if self.host_name in ["nuke", "hiero"]: - ocio_path = ocio_path.replace("\\", "/") - - self.log.info( - f"Setting OCIO environment to config path: {ocio_path}") - - self.launch_context.env["OCIO"] = ocio_path - else: + if not config_data: self.log.debug("OCIO not set or enabled") + return + + ocio_path = config_data["path"] + + if self.host_name in ["nuke", "hiero"]: + ocio_path = ocio_path.replace("\\", "/") + + self.log.info( + f"Setting OCIO environment to config path: {ocio_path}") + + self.launch_context.env["OCIO"] = ocio_path diff --git a/client/ayon_core/hosts/aftereffects/api/launch_logic.py b/client/ayon_core/hosts/aftereffects/api/launch_logic.py index 5a23f2cb35..da6887668a 100644 --- a/client/ayon_core/hosts/aftereffects/api/launch_logic.py +++ b/client/ayon_core/hosts/aftereffects/api/launch_logic.py @@ -60,7 +60,7 @@ def main(*subprocess_args): ) ) - elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True): + elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True): save = False if os.getenv("WORKFILES_SAVE_AS"): save = True diff --git a/client/ayon_core/hosts/aftereffects/api/pipeline.py b/client/ayon_core/hosts/aftereffects/api/pipeline.py index 105fee64b9..2239040f09 100644 --- a/client/ayon_core/hosts/aftereffects/api/pipeline.py +++ b/client/ayon_core/hosts/aftereffects/api/pipeline.py @@ -8,14 +8,11 @@ from ayon_core.lib import Logger, register_event_callback from ayon_core.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, + register_workfile_build_plugin_path, AVALON_CONTAINER_ID, AVALON_INSTANCE_ID, AYON_INSTANCE_ID, ) -from ayon_core.hosts.aftereffects.api.workfile_template_builder import ( - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin -) from ayon_core.pipeline.load import any_outdated_containers import ayon_core.hosts.aftereffects @@ -40,6 +37,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): @@ -76,6 +74,7 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) register_event_callback("application.launched", application_launch) @@ -118,12 +117,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): item["id"] = "publish_context" self.stub.imprint(item["id"], item) - def get_workfile_build_placeholder_plugins(self): - return [ - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin - ] - # created instances section def list_instances(self): """List all created instances from current workfile which diff --git a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py index aa2f36e8aa..99d5bbb938 100644 --- a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py @@ -1,6 +1,7 @@ import os.path import uuid import shutil +from abc import abstractmethod from ayon_core.pipeline import registered_host from ayon_core.tools.workfile_template_build import ( @@ -9,13 +10,9 @@ from ayon_core.tools.workfile_template_build import ( from ayon_core.pipeline.workfile.workfile_template_builder import ( AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin + PlaceholderItem ) from ayon_core.hosts.aftereffects.api import get_stub -from ayon_core.hosts.aftereffects.api.lib import set_settings PLACEHOLDER_SET = "PLACEHOLDERS_SET" PLACEHOLDER_ID = "openpype.placeholder" @@ -51,6 +48,10 @@ class AETemplateBuilder(AbstractTemplateBuilder): class AEPlaceholderPlugin(PlaceholderPlugin): """Contains generic methods for all PlaceholderPlugins.""" + @abstractmethod + def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem: + pass + def collect_placeholders(self): """Collect info from file metadata about created placeholders. @@ -63,17 +64,7 @@ class AEPlaceholderPlugin(PlaceholderPlugin): if item.get("plugin_identifier") != self.identifier: continue - if isinstance(self, AEPlaceholderLoadPlugin): - item = LoadPlaceholderItem(item["uuid"], - item["data"], - self) - elif isinstance(self, AEPlaceholderCreatePlugin): - item = CreatePlaceholderItem(item["uuid"], - item["data"], - self) - else: - raise NotImplementedError(f"Not implemented for {type(self)}") - + item = self._create_placeholder_item(item) output.append(item) return output @@ -135,87 +126,6 @@ class AEPlaceholderPlugin(PlaceholderPlugin): stub.imprint(item_id, container_data) -class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin): - """Adds Create placeholder. - - This adds composition and runs Create - """ - identifier = "aftereffects.create" - label = "AfterEffects create" - - def create_placeholder(self, placeholder_data): - stub = get_stub() - name = "CREATEPLACEHOLDER" - item_id = stub.add_item(name, "COMP") - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Replace 'placeholder' with publishable instance. - - Renames prepared composition name, creates publishable instance, sets - frame/duration settings according to DB. - """ - pre_create_data = {"use_selection": True} - item_id, item = self._get_item(placeholder) - get_stub().select_items([item_id]) - self.populate_create_placeholder(placeholder, pre_create_data) - - # apply settings for populated composition - item_id, metadata_item = self._get_item(placeholder) - set_settings(True, True, [item_id]) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - -class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin): - identifier = "aftereffects.load" - label = "AfterEffects load" - - def create_placeholder(self, placeholder_data): - """Creates AE's Placeholder item in Project items list. - - Sets dummy resolution/duration/fps settings, will be replaced when - populated. - """ - stub = get_stub() - name = "LOADERPLACEHOLDER" - item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Use Openpype Loader from `placeholder` to create new FootageItems - - New FootageItems are created, files are imported. - """ - self.populate_load_placeholder(placeholder) - errors = placeholder.get_errors() - stub = get_stub() - if errors: - stub.print_msg("\n".join(errors)) - else: - if not placeholder.data["keep_placeholder"]: - metadata = stub.get_metadata() - for item in metadata: - if not item.get("is_placeholder"): - continue - scene_identifier = item.get("uuid") - if (scene_identifier and - scene_identifier == placeholder.scene_identifier): - stub.delete_item(item["members"][0]) - stub.remove_instance(placeholder.scene_identifier, metadata) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def load_succeed(self, placeholder, container): - placeholder_item_id, _ = self._get_item(placeholder) - item_id = container.id - get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) - - def build_workfile_template(*args, **kwargs): builder = AETemplateBuilder(registered_host()) builder.build_template(*args, **kwargs) diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py index c28042b6ae..ebd4b8f944 100644 --- a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py @@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance): class CollectAERender(publish.AbstractCollectRender): - order = pyblish.api.CollectorOrder + 0.405 + order = pyblish.api.CollectorOrder + 0.100 label = "Collect After Effects Render Layers" hosts = ["aftereffects"] @@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender): if "review" in instance.families: # to skip ExtractReview locally instance.families.remove("review") + instance.deadline = inst.data.get("deadline") instances.append(instance) diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py new file mode 100644 index 0000000000..c7927f176f --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py @@ -0,0 +1,49 @@ +from ayon_core.pipeline.workfile.workfile_template_builder import ( + CreatePlaceholderItem, + PlaceholderCreateMixin +) +from ayon_core.hosts.aftereffects.api import get_stub +from ayon_core.hosts.aftereffects.api.lib import set_settings +import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb + + +class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin, + PlaceholderCreateMixin): + """Adds Create placeholder. + + This adds composition and runs Create + """ + identifier = "aftereffects.create" + label = "AfterEffects create" + + def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem: + return CreatePlaceholderItem( + scene_identifier=item_data["uuid"], + data=item_data["data"], + plugin=self + ) + + def create_placeholder(self, placeholder_data): + stub = get_stub() + name = "CREATEPLACEHOLDER" + item_id = stub.add_item(name, "COMP") + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Replace 'placeholder' with publishable instance. + + Renames prepared composition name, creates publishable instance, sets + frame/duration settings according to DB. + """ + pre_create_data = {"use_selection": True} + item_id, item = self._get_item(placeholder) + get_stub().select_items([item_id]) + self.populate_create_placeholder(placeholder, pre_create_data) + + # apply settings for populated composition + item_id, metadata_item = self._get_item(placeholder) + set_settings(True, True, [item_id]) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..7f7e4f49ce --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,60 @@ +from ayon_core.pipeline.workfile.workfile_template_builder import ( + LoadPlaceholderItem, + PlaceholderLoadMixin +) +from ayon_core.hosts.aftereffects.api import get_stub +import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb + + +class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin): + identifier = "aftereffects.load" + label = "AfterEffects load" + + def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem: + return LoadPlaceholderItem( + scene_identifier=item_data["uuid"], + data=item_data["data"], + plugin=self + ) + + def create_placeholder(self, placeholder_data): + """Creates AE's Placeholder item in Project items list. + + Sets dummy resolution/duration/fps settings, will be replaced when + populated. + """ + stub = get_stub() + name = "LOADERPLACEHOLDER" + item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Use Openpype Loader from `placeholder` to create new FootageItems + + New FootageItems are created, files are imported. + """ + self.populate_load_placeholder(placeholder) + errors = placeholder.get_errors() + stub = get_stub() + if errors: + stub.print_msg("\n".join(errors)) + else: + if not placeholder.data["keep_placeholder"]: + metadata = stub.get_metadata() + for item in metadata: + if not item.get("is_placeholder"): + continue + scene_identifier = item.get("uuid") + if (scene_identifier and + scene_identifier == placeholder.scene_identifier): + stub.delete_item(item["members"][0]) + stub.remove_instance(placeholder.scene_identifier, metadata) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def load_succeed(self, placeholder, container): + placeholder_item_id, _ = self._get_item(placeholder) + item_id = container.id + get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) diff --git a/client/ayon_core/hosts/blender/api/lib.py b/client/ayon_core/hosts/blender/api/lib.py index 458a275b51..32137f0fcd 100644 --- a/client/ayon_core/hosts/blender/api/lib.py +++ b/client/ayon_core/hosts/blender/api/lib.py @@ -33,7 +33,7 @@ def load_scripts(paths): if register: try: register() - except: + except: # noqa E722 traceback.print_exc() else: print("\nWarning! '%s' has no register function, " @@ -45,7 +45,7 @@ def load_scripts(paths): if unregister: try: unregister() - except: + except: # noqa E722 traceback.print_exc() def test_reload(mod): @@ -57,7 +57,7 @@ def load_scripts(paths): try: return importlib.reload(mod) - except: + except: # noqa E722 traceback.print_exc() def test_register(mod): diff --git a/client/ayon_core/hosts/blender/api/plugin.py b/client/ayon_core/hosts/blender/api/plugin.py index 6c9bfb6569..4a13d16805 100644 --- a/client/ayon_core/hosts/blender/api/plugin.py +++ b/client/ayon_core/hosts/blender/api/plugin.py @@ -143,13 +143,19 @@ def deselect_all(): if obj.mode != 'OBJECT': modes.append((obj, obj.mode)) bpy.context.view_layer.objects.active = obj - bpy.ops.object.mode_set(mode='OBJECT') + context_override = create_blender_context(active=obj) + with bpy.context.temp_override(**context_override): + bpy.ops.object.mode_set(mode='OBJECT') - bpy.ops.object.select_all(action='DESELECT') + context_override = create_blender_context() + with bpy.context.temp_override(**context_override): + bpy.ops.object.select_all(action='DESELECT') for p in modes: bpy.context.view_layer.objects.active = p[0] - bpy.ops.object.mode_set(mode=p[1]) + context_override = create_blender_context(active=p[0]) + with bpy.context.temp_override(**context_override): + bpy.ops.object.mode_set(mode=p[1]) bpy.context.view_layer.objects.active = active diff --git a/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py index 6178578081..a49bb40d9a 100644 --- a/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py +++ b/client/ayon_core/hosts/blender/plugins/load/load_camera_abc.py @@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader): def _process(self, libpath, asset_group, group_name): plugin.deselect_all() - bpy.ops.wm.alembic_import(filepath=libpath) + # Force the creation of the transform cache even if the camera + # doesn't have an animation. We use the cache to update the camera. + bpy.ops.wm.alembic_import( + filepath=libpath, always_add_cache_reader=True) objects = lib.get_selection() @@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader): self.log.info("Library already loaded, not updating...") return - mat = asset_group.matrix_basis.copy() + for obj in asset_group.children: + found = False + for constraint in obj.constraints: + if constraint.type == "TRANSFORM_CACHE": + constraint.cache_file.filepath = libpath.as_posix() + found = True + break + if not found: + # This is to keep compatibility with cameras loaded with + # the old loader + # Create a new constraint for the cache file + constraint = obj.constraints.new("TRANSFORM_CACHE") + bpy.ops.cachefile.open(filepath=libpath.as_posix()) + constraint.cache_file = bpy.data.cache_files[-1] + constraint.cache_file.scale = 1.0 - self._remove(asset_group) - self._process(str(libpath), asset_group, object_name) + # This is a workaround to set the object path. Blender doesn't + # load the list of object paths until the object is evaluated. + # This is a hack to force the object to be evaluated. + # The modifier doesn't need to be removed because camera + # objects don't have modifiers. + obj.modifiers.new( + name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE') + bpy.context.evaluated_depsgraph_get() - asset_group.matrix_basis = mat + constraint.object_path = ( + constraint.cache_file.object_paths[0].path) metadata["libpath"] = str(libpath) metadata["representation"] = repre_entity["id"] diff --git a/client/ayon_core/hosts/fusion/api/action.py b/client/ayon_core/hosts/fusion/api/action.py index 1643f1ce03..a0c6aafcb5 100644 --- a/client/ayon_core/hosts/fusion/api/action.py +++ b/client/ayon_core/hosts/fusion/api/action.py @@ -58,3 +58,55 @@ class SelectInvalidAction(pyblish.api.Action): self.log.info( "Selecting invalid tools: %s" % ", ".join(sorted(names)) ) + + +class SelectToolAction(pyblish.api.Action): + """Select invalid output tool in Fusion when plug-in failed. + + """ + + label = "Select saver" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context( + context, + plugin=plugin, + ) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + tools = [] + for instance in errored_instances: + + tool = instance.data.get("tool") + if tool is not None: + tools.append(tool) + else: + self.log.warning( + "Plug-in returned to be invalid, " + f"but has no saver for instance {instance.name}." + ) + + if not tools: + # Assume relevant comp is current comp and clear selection + self.log.info("No invalid tools found.") + comp = get_current_comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + return + + # Assume a single comp + first_tool = tools[0] + comp = first_tool.Comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + names = set() + for tool in tools: + flow.Select(tool, True) + comp.SetActiveTool(tool) + names.add(tool.Name) + self.log.info( + "Selecting invalid tools: %s" % ", ".join(sorted(names)) + ) diff --git a/client/ayon_core/hosts/fusion/api/lib.py b/client/ayon_core/hosts/fusion/api/lib.py index 08722463e1..7f7d20010d 100644 --- a/client/ayon_core/hosts/fusion/api/lib.py +++ b/client/ayon_core/hosts/fusion/api/lib.py @@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False): def _on_repair(): attributes = dict() for key, comp_key, _label in validations: - value = folder_value[key] + value = folder_attributes[key] comp_key_full = "Comp.FrameFormat.{}".format(comp_key) attributes[comp_key_full] = value comp.SetPrefs(attributes) diff --git a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py index 7a2844d5db..9c04e59717 100644 --- a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py @@ -52,7 +52,7 @@ class CollectFusionRender( if product_type not in ["render", "image"]: continue - task_name = context.data["task"] + task_name = inst.data["task"] tool = inst.data["transientData"]["tool"] instance_families = inst.data.get("families", []) @@ -115,6 +115,7 @@ class CollectFusionRender( if "review" in instance.families: # to skip ExtractReview locally instance.families.remove("review") + instance.deadline = inst.data.get("deadline") instances.append(instance) diff --git a/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_in_context.py b/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_in_context.py new file mode 100644 index 0000000000..3aa6fb452f --- /dev/null +++ b/client/ayon_core/hosts/fusion/plugins/publish/validate_instance_in_context.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +"""Validate if instance context is the same as publish context.""" + +import pyblish.api +from ayon_core.hosts.fusion.api.action import SelectToolAction +from ayon_core.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin +) + + +class ValidateInstanceInContextFusion(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validator to check if instance context matches context of publish. + + When working in per-shot style you always publish data in context of + current asset (shot). This validator checks if this is so. It is optional + so it can be disabled when needed. + """ + # Similar to maya and houdini-equivalent `ValidateInstanceInContext` + + order = ValidateContentsOrder + label = "Instance in same Context" + optional = True + hosts = ["fusion"] + actions = [SelectToolAction, RepairAction] + + def process(self, instance): + if not self.is_active(instance.data): + return + + instance_context = self.get_context(instance.data) + context = self.get_context(instance.context.data) + if instance_context != context: + context_label = "{} > {}".format(*context) + instance_label = "{} > {}".format(*instance_context) + + raise PublishValidationError( + message=( + "Instance '{}' publishes to different asset than current " + "context: {}. Current context: {}".format( + instance.name, instance_label, context_label + ) + ), + description=( + "## Publishing to a different asset\n" + "There are publish instances present which are publishing " + "into a different asset than your current context.\n\n" + "Usually this is not what you want but there can be cases " + "where you might want to publish into another asset or " + "shot. If that's the case you can disable the validation " + "on the instance to ignore it." + ) + ) + + @classmethod + def repair(cls, instance): + + create_context = instance.context.data["create_context"] + instance_id = instance.data.get("instance_id") + created_instance = create_context.get_instance_by_id( + instance_id + ) + if created_instance is None: + raise RuntimeError( + f"No CreatedInstances found with id '{instance_id} " + f"in {create_context.instances_by_id}" + ) + + context_asset, context_task = cls.get_context(instance.context.data) + created_instance["folderPath"] = context_asset + created_instance["task"] = context_task + create_context.save_changes() + + @staticmethod + def get_context(data): + """Return asset, task from publishing context data""" + return data["folderPath"], data["task"] diff --git a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py index 156e2ac6ba..c63eb114e5 100644 --- a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py @@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender): outputFormat=info[1], outputStartFrame=info[3], leadingZeros=info[2], - ignoreFrameHandleCheck=True + ignoreFrameHandleCheck=True, + #todo: inst is not available, must be determined, fix when + #reworking to Publisher + # deadline=inst.data.get("deadline") ) render_instance.context = context diff --git a/client/ayon_core/hosts/hiero/api/events.py b/client/ayon_core/hosts/hiero/api/events.py index 304605e24e..663004abd2 100644 --- a/client/ayon_core/hosts/hiero/api/events.py +++ b/client/ayon_core/hosts/hiero/api/events.py @@ -8,6 +8,7 @@ from .lib import ( sync_avalon_data_to_workfile, launch_workfiles_app, before_project_save, + apply_colorspace_project ) from .tags import add_tags_to_workfile from .menu import update_menu_task_label @@ -44,6 +45,8 @@ def afterNewProjectCreated(event): # reset workfiles startup not to open any more in session os.environ["WORKFILES_STARTUP"] = "0" + apply_colorspace_project() + def beforeProjectLoad(event): log.info("before project load event...") @@ -122,6 +125,7 @@ def register_hiero_events(): except RuntimeError: pass + def register_events(): """ Adding all callbacks. diff --git a/client/ayon_core/hosts/hiero/api/lib.py b/client/ayon_core/hosts/hiero/api/lib.py index 8682ff7780..456a68f125 100644 --- a/client/ayon_core/hosts/hiero/api/lib.py +++ b/client/ayon_core/hosts/hiero/api/lib.py @@ -11,7 +11,6 @@ import warnings import json import ast import secrets -import shutil import hiero from qtpy import QtWidgets, QtCore @@ -36,9 +35,6 @@ from .constants import ( DEFAULT_SEQUENCE_NAME, DEFAULT_BIN_NAME ) -from ayon_core.pipeline.colorspace import ( - get_imageio_config -) class _CTX: @@ -105,9 +101,9 @@ def flatten(list_): def get_current_project(remove_untitled=False): - projects = flatten(hiero.core.projects()) + projects = hiero.core.projects() if not remove_untitled: - return next(iter(projects)) + return projects[0] # if remove_untitled for proj in projects: @@ -1050,30 +1046,84 @@ def _set_hrox_project_knobs(doc, **knobs): def apply_colorspace_project(): - project_name = get_current_project_name() - # get path the the active projects - project = get_current_project(remove_untitled=True) - current_file = project.path() - - # close the active project - project.close() + """Apply colorspaces from settings. + Due to not being able to set the project settings through the Python API, + we need to do use some dubious code to find the widgets and set them. It is + possible to set the project settings without traversing through the widgets + but it involves reading the hrox files from disk with XML, so no in-memory + support. See https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa + for more details. + """ # get presets for hiero + project_name = get_current_project_name() imageio = get_project_settings(project_name)["hiero"]["imageio"] presets = imageio.get("workfile") + # Open Project Settings UI. + for act in hiero.ui.registeredActions(): + if act.objectName() == "foundry.project.settings": + act.trigger() + + # Find widgets from their sibling label. + labels = { + "Working Space:": "workingSpace", + "Viewer:": "viewerLut", + "Thumbnails:": "thumbnailLut", + "Monitor Out:": "monitorOutLut", + "8 Bit Files:": "eightBitLut", + "16 Bit Files:": "sixteenBitLut", + "Log Files:": "logLut", + "Floating Point Files:": "floatLut" + } + widgets = {x: None for x in labels.values()} + + def _recursive_children(widget, labels, widgets): + children = widget.children() + for count, child in enumerate(children): + if isinstance(child, QtWidgets.QLabel): + if child.text() in labels.keys(): + widgets[labels[child.text()]] = children[count + 1] + _recursive_children(child, labels, widgets) + + app = QtWidgets.QApplication.instance() + title = "Project Settings" + for widget in app.topLevelWidgets(): + if isinstance(widget, QtWidgets.QMainWindow): + if widget.windowTitle() != title: + continue + _recursive_children(widget, labels, widgets) + widget.close() + + msg = "Setting value \"{}\" is not a valid option for \"{}\"" + for key, widget in widgets.items(): + options = [widget.itemText(i) for i in range(widget.count())] + setting_value = presets[key] + assert setting_value in options, msg.format(setting_value, key) + widget.setCurrentText(presets[key]) + + # This code block is for setting up project colorspaces for files on disk. + # Due to not having Python API access to set the project settings, the + # Foundry recommended way is to modify the hrox files on disk with XML. See + # this forum thread for more details; + # https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa + ''' # backward compatibility layer # TODO: remove this after some time - config_data = get_imageio_config( - project_name=get_current_project_name(), - host_name="hiero" - ) + config_data = get_current_context_imageio_config_preset() if config_data: presets.update({ "ocioConfigName": "custom" }) + # get path the the active projects + project = get_current_project() + current_file = project.path() + + msg = "The project needs to be saved to disk to apply colorspace settings." + assert current_file, msg + # save the workfile as subversion "comment:_colorspaceChange" split_current_file = os.path.splitext(current_file) copy_current_file = current_file @@ -1116,6 +1166,7 @@ def apply_colorspace_project(): # open the file as current project hiero.core.openProject(copy_current_file) + ''' def apply_colorspace_clips(): @@ -1125,10 +1176,8 @@ def apply_colorspace_clips(): # get presets for hiero imageio = get_project_settings(project_name)["hiero"]["imageio"] - from pprint import pprint presets = imageio.get("regexInputs", {}).get("inputs", {}) - pprint(presets) for clip in clips: clip_media_source_path = clip.mediaSource().firstpath() clip_name = clip.name() diff --git a/client/ayon_core/hosts/hiero/api/tags.py b/client/ayon_core/hosts/hiero/api/tags.py index 5abfee75d0..d4acb23493 100644 --- a/client/ayon_core/hosts/hiero/api/tags.py +++ b/client/ayon_core/hosts/hiero/api/tags.py @@ -144,7 +144,7 @@ def add_tags_to_workfile(): # Get project task types. project_name = get_current_project_name() project_entity = ayon_api.get_project(project_name) - task_types = project_entity["taskType"] + task_types = project_entity["taskTypes"] nks_pres_tags["[Tasks]"] = {} log.debug("__ tasks: {}".format(task_types)) for task_type in task_types: diff --git a/client/ayon_core/hosts/hiero/api/workio.py b/client/ayon_core/hosts/hiero/api/workio.py index 4c2416ca38..6e8fc20172 100644 --- a/client/ayon_core/hosts/hiero/api/workio.py +++ b/client/ayon_core/hosts/hiero/api/workio.py @@ -51,13 +51,12 @@ def open_file(filepath): project = hiero.core.projects()[-1] - # open project file - hiero.core.openProject(filepath.replace(os.path.sep, "/")) - - # close previous project - project.close() - - + # Close previous project if its different to the current project. + filepath = filepath.replace(os.path.sep, "/") + if project.path().replace(os.path.sep, "/") != filepath: + # open project file + hiero.core.openProject(filepath) + project.close() return True diff --git a/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py index 2e97c06bff..7d41979600 100644 --- a/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py +++ b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py @@ -24,8 +24,14 @@ class SetDefaultDisplayView(PreLaunchHook): if not OCIO: return + # workfile settings added in '0.2.13' houdini_color_settings = \ - self.data["project_settings"]["houdini"]["imageio"]["workfile"] + self.data["project_settings"]["houdini"]["imageio"].get("workfile") + + if not houdini_color_settings: + self.log.info("Hook 'SetDefaultDisplayView' requires Houdini " + "addon version >= '0.2.13'") + return if not houdini_color_settings["enabled"]: self.log.info( diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py index f65b54a452..1208cfc1ea 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_arnold_rop.py @@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator): # Default extension ext = "exr" - # Default to split export and render jobs - export_job = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): import hou + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] # Remove the active, we are checking the bypass flag of the nodes instance_data.pop("active", None) @@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator): # Add chunk size attribute instance_data["chunkSize"] = 1 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateArnoldRop, self).create( product_name, @@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator): "ar_exr_half_precision": 1 # half precision } - if pre_create_data.get("export_job"): + if pre_create_data.get("render_target") == "farm_split": ass_filepath = \ "{export_dir}{product_name}/{product_name}.$F4.ass".format( export_dir=hou.text.expandString("$HIP/pyblish/ass/"), @@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator): to_lock = ["productType", "id"] self.lock_parameters(instance_node, to_lock) - def get_pre_create_attr_defs(self): - attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs() + def get_instance_attr_defs(self): + """get instance attribute definitions. + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target), + ] + + def get_pre_create_attr_defs(self): image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("export_job", - label="Split export and render jobs", - default=self.export_job), + attrs = [ EnumDef("image_format", image_format_enum, default=self.ext, - label="Image Format Options") + label="Image Format Options"), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py index e91ddbc0ac..48cf5057ab 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_karma_rop.py @@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator): product_type = "karma_rop" icon = "magic" + # Default render target + render_target = "farm" + def create(self, product_name, instance_data, pre_create_data): import hou # noqa + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "karma"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateKarmaROP, self).create( product_name, @@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator): to_lock = ["productType", "id"] self.lock_parameters(instance_node, to_lock) - def get_pre_create_attr_defs(self): - attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs() + def get_instance_attr_defs(self): + """get instance attribute definitions. + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + + + def get_pre_create_attr_defs(self): image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), + attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs() + + attrs += [ EnumDef("image_format", image_format_enum, default="exr", @@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator): decimals=0), BoolDef("cam_res", label="Camera Resolution", - default=False) + default=False), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py index 64ecf428e9..05b4431aba 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_mantra_rop.py @@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator): product_type = "mantra_rop" icon = "magic" - # Default to split export and render jobs - export_job = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): import hou # noqa + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "ifd"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateMantraROP, self).create( product_name, @@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator): "vm_picture": filepath, } - if pre_create_data.get("export_job"): + if pre_create_data.get("render_target") == "farm_split": ifd_filepath = \ "{export_dir}{product_name}/{product_name}.$F4.ifd".format( export_dir=hou.text.expandString("$HIP/pyblish/ifd/"), @@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator): to_lock = ["productType", "id"] self.lock_parameters(instance_node, to_lock) - def get_pre_create_attr_defs(self): - attrs = super(CreateMantraROP, self).get_pre_create_attr_defs() + def get_instance_attr_defs(self): + """get instance attribute definitions. + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + + def get_pre_create_attr_defs(self): image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("export_job", - label="Split export and render jobs", - default=self.export_job), + attrs = super(CreateMantraROP, self).get_pre_create_attr_defs() + + attrs += [ EnumDef("image_format", image_format_enum, default="exr", @@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator): label="Override Camera Resolution", tooltip="Override the current camera " "resolution, recommended for IPR.", - default=False) + default=False), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py index 1cd239e929..3ecb09ee9b 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_redshift_rop.py @@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator): ext = "exr" multi_layered_mode = "No Multi-Layered EXR File" - # Default to split export and render jobs - split_render = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "Redshift_ROP"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateRedshiftROP, self).create( product_name, @@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator): rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs" parms["RS_archive_file"] = rs_filepath - if pre_create_data.get("split_render", self.split_render): + if pre_create_data.get("render_target") == "farm_split": parms["RS_archive_enable"] = 1 instance_node.setParms(parms) @@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator): return super(CreateRedshiftROP, self).remove_instances(instances) + def get_instance_attr_defs(self): + """get instance attribute definitions. + + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + def get_pre_create_attr_defs(self): - attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs() + image_format_enum = [ "exr", "tif", "jpg", "png", ] + multi_layered_mode = [ "No Multi-Layered EXR File", "Full Multi-Layered EXR File" ] - - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("split_render", - label="Split export and render jobs", - default=self.split_render), + attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs() + attrs += [ EnumDef("image_format", image_format_enum, default=self.ext, @@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator): EnumDef("multi_layered_mode", multi_layered_mode, default=self.multi_layered_mode, - label="Multi-Layered EXR") + label="Multi-Layered EXR"), ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_review.py b/client/ayon_core/hosts/houdini/plugins/create/create_review.py index 94dcf23181..f5e4d4ce64 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_review.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_review.py @@ -18,8 +18,11 @@ class CreateReview(plugin.HoudiniCreator): def apply_settings(self, project_settings): super(CreateReview, self).apply_settings(project_settings) - color_settings = project_settings["houdini"]["imageio"]["workfile"] - if color_settings["enabled"]: + # workfile settings added in '0.2.13' + color_settings = project_settings["houdini"]["imageio"].get( + "workfile", {} + ) + if color_settings.get("enabled"): self.review_color_space = color_settings.get("review_color_space") def create(self, product_name, instance_data, pre_create_data): diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py index 5ed9e848a7..9e4633e745 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_vray_rop.py @@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator): icon = "magic" ext = "exr" - # Default to split export and render jobs - export_job = True + # Default render target + render_target = "farm_split" def create(self, product_name, instance_data, pre_create_data): + # Transfer settings from pre create to instance + creator_attributes = instance_data.setdefault( + "creator_attributes", dict()) + for key in ["render_target", "review"]: + if key in pre_create_data: + creator_attributes[key] = pre_create_data[key] instance_data.pop("active", None) instance_data.update({"node_type": "vray_renderer"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - # Submit for job publishing - instance_data["farm"] = pre_create_data.get("farm") instance = super(CreateVrayROP, self).create( product_name, @@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator): "SettingsEXR_bits_per_channel": "16" # half precision } - if pre_create_data.get("export_job"): + if pre_create_data.get("render_target") == "farm_split": scene_filepath = \ "{export_dir}{product_name}/{product_name}.$F4.vrscene".format( export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"), @@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator): return super(CreateVrayROP, self).remove_instances(instances) + def get_instance_attr_defs(self): + """get instance attribute definitions. + + Attributes defined in this method are exposed in + publish tab in the publisher UI. + """ + + + render_target_items = { + "local": "Local machine rendering", + "local_no_render": "Use existing frames (local)", + "farm": "Farm Rendering", + "farm_split": "Farm Rendering - Split export & render jobs", + } + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + EnumDef("render_target", + items=render_target_items, + label="Render target", + default=self.render_target) + ] + def get_pre_create_attr_defs(self): - attrs = super(CreateVrayROP, self).get_pre_create_attr_defs() image_format_enum = [ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", "rad", "rat", "rta", "sgi", "tga", "tif", ] - return attrs + [ - BoolDef("farm", - label="Submitting to Farm", - default=True), - BoolDef("export_job", - label="Split export and render jobs", - default=self.export_job), + attrs = super(CreateVrayROP, self).get_pre_create_attr_defs() + + attrs += [ EnumDef("image_format", image_format_enum, default=self.ext, @@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator): "if enabled", default=False) ] + return attrs + self.get_instance_attr_defs() diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py index a958509e25..40a607e81a 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_workfile.py @@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator): # write workfile information to context container. op_ctx = hou.node(CONTEXT_CONTAINER) if not op_ctx: - op_ctx = self.create_context_node() + op_ctx = self.host.create_context_node() workfile_data = {"workfile": current_instance.data_to_store()} imprint(op_ctx, workfile_data) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py index 7fe38555a3..53a3e52717 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_arnold_rop.py @@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): default_prefix = evalParmNoFrame(rop, "ar_picture") render_products = [] - # Store whether we are splitting the render job (export + render) - split_render = bool(rop.parm("ar_ass_export_enable").eval()) - instance.data["splitRender"] = split_render export_prefix = None export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "ar_ass_file", pad_character="0" ) @@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): "": self.generate_expected_files(instance, beauty_product) } + # Assume it's a multipartExr Render. + multipartExr = True + num_aovs = rop.evalParm("ar_aovs") + # TODO: Check the following logic. + # as it always assumes that all AOV are not merged. for index in range(1, num_aovs + 1): # Skip disabled AOVs if not rop.evalParm("ar_enable_aov{}".format(index)): @@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): files_by_aov[label] = self.generate_expected_files(instance, aov_product) + # Set to False as soon as we have a separated aov. + multipartExr = False + + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr + for product in render_products: self.log.debug("Found render product: {}".format(product)) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py index 040ad68a1a..e931c7bf1b 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py @@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api import lib class CollectDataforCache(pyblish.api.InstancePlugin): """Collect data for caching to Deadline.""" - order = pyblish.api.CollectorOrder + 0.04 + # Run after Collect Frames + order = pyblish.api.CollectorOrder + 0.11 families = ["ass", "pointcache", "mantraifd", "redshiftproxy", "vdbcache"] diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py new file mode 100644 index 0000000000..586aa2da57 --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_farm_instances.py @@ -0,0 +1,35 @@ +import pyblish.api + + +class CollectFarmInstances(pyblish.api.InstancePlugin): + """Collect instances for farm render.""" + + order = pyblish.api.CollectorOrder + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + hosts = ["houdini"] + targets = ["local", "remote"] + label = "Collect farm instances" + + def process(self, instance): + + creator_attribute = instance.data["creator_attributes"] + + # Collect Render Target + if creator_attribute.get("render_target") not in { + "farm_split", "farm" + }: + instance.data["farm"] = False + instance.data["splitRender"] = False + self.log.debug("Render on farm is disabled. " + "Skipping farm collecting.") + return + + instance.data["farm"] = True + instance.data["splitRender"] = ( + creator_attribute.get("render_target") == "farm_split" + ) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py index a643ab0d38..b38ebc6e2f 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py @@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin): label = "Collect Frames" families = ["vdbcache", "imagesequence", "ass", "mantraifd", "redshiftproxy", "review", - "bgeo"] + "pointcache"] def process(self, instance): diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py index 78651b0c69..662ed7ae30 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_karma_rop.py @@ -55,6 +55,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin): beauty_product) } + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + # By default karma render is a multipart Exr. + instance.data["multipartExr"] = True + filenames = list(render_products) instance.data["files"] = filenames instance.data["renderProducts"] = colorspace.ARenderProduct() diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py new file mode 100644 index 0000000000..474002e1ee --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_local_render_instances.py @@ -0,0 +1,137 @@ +import os +import pyblish.api +from ayon_core.pipeline.create import get_product_name +from ayon_core.pipeline.farm.patterning import match_aov_pattern +from ayon_core.pipeline.publish import ( + get_plugin_settings, + apply_plugin_settings_automatically +) + + +class CollectLocalRenderInstances(pyblish.api.InstancePlugin): + """Collect instances for local render. + + Agnostic Local Render Collector. + """ + + # this plugin runs after Collect Render Products + order = pyblish.api.CollectorOrder + 0.12 + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + hosts = ["houdini"] + label = "Collect local render instances" + + use_deadline_aov_filter = False + aov_filter = {"host_name": "houdini", + "value": [".*([Bb]eauty).*"]} + + @classmethod + def apply_settings(cls, project_settings): + # Preserve automatic settings applying logic + settings = get_plugin_settings(plugin=cls, + project_settings=project_settings, + log=cls.log, + category="houdini") + apply_plugin_settings_automatically(cls, settings, logger=cls.log) + + if not cls.use_deadline_aov_filter: + # get aov_filter from collector settings + # and restructure it as match_aov_pattern requires. + cls.aov_filter = { + cls.aov_filter["host_name"]: cls.aov_filter["value"] + } + else: + # get aov_filter from deadline settings + cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"] + cls.aov_filter = { + item["name"]: item["value"] + for item in cls.aov_filter + } + + def process(self, instance): + + if instance.data["farm"]: + self.log.debug("Render on farm is enabled. " + "Skipping local render collecting.") + return + + # Create Instance for each AOV. + context = instance.context + expectedFiles = next(iter(instance.data["expectedFiles"]), {}) + + product_type = "render" # is always render + product_group = get_product_name( + context.data["projectName"], + context.data["taskEntity"]["name"], + context.data["taskEntity"]["taskType"], + context.data["hostName"], + product_type, + instance.data["productName"] + ) + + for aov_name, aov_filepaths in expectedFiles.items(): + product_name = product_group + + if aov_name: + product_name = "{}_{}".format(product_name, aov_name) + + # Create instance for each AOV + aov_instance = context.create_instance(product_name) + + # Prepare Representation for each AOV + aov_filenames = [os.path.basename(path) for path in aov_filepaths] + staging_dir = os.path.dirname(aov_filepaths[0]) + ext = aov_filepaths[0].split(".")[-1] + + # Decide if instance is reviewable + preview = False + if instance.data.get("multipartExr", False): + # Add preview tag because its multipartExr. + preview = True + else: + # Add Preview tag if the AOV matches the filter. + preview = match_aov_pattern( + "houdini", self.aov_filter, aov_filenames[0] + ) + + preview = preview and instance.data.get("review", False) + + # Support Single frame. + # The integrator wants single files to be a single + # filename instead of a list. + # More info: https://github.com/ynput/ayon-core/issues/238 + if len(aov_filenames) == 1: + aov_filenames = aov_filenames[0] + + aov_instance.data.update({ + # 'label': label, + "task": instance.data["task"], + "folderPath": instance.data["folderPath"], + "frameStart": instance.data["frameStartHandle"], + "frameEnd": instance.data["frameEndHandle"], + "productType": product_type, + "family": product_type, + "productName": product_name, + "productGroup": product_group, + "families": ["render.local.hou", "review"], + "instance_node": instance.data["instance_node"], + "representations": [ + { + "stagingDir": staging_dir, + "ext": ext, + "name": ext, + "tags": ["review"] if preview else [], + "files": aov_filenames, + "frameStart": instance.data["frameStartHandle"], + "frameEnd": instance.data["frameEndHandle"] + } + ] + }) + + # Skip integrating original render instance. + # We are not removing it because it's used to trigger the render. + instance.data["integrate"] = False diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py index df9acc4b61..7b247768fc 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_mantra_rop.py @@ -44,12 +44,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): default_prefix = evalParmNoFrame(rop, "vm_picture") render_products = [] - # Store whether we are splitting the render job (export + render) - split_render = bool(rop.parm("soho_outputmode").eval()) - instance.data["splitRender"] = split_render export_prefix = None export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "soho_diskfile", pad_character="0" ) @@ -74,6 +71,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): beauty_product) } + # Assume it's a multipartExr Render. + multipartExr = True + + # TODO: This logic doesn't take into considerations + # cryptomatte defined in 'Images > Cryptomatte' aov_numbers = rop.evalParm("vm_numaux") if aov_numbers > 0: # get the filenames of the AOVs @@ -93,6 +95,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa + # Set to False as soon as we have a separated aov. + multipartExr = False + + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr + for product in render_products: self.log.debug("Found render product: %s" % product) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py index 55a55bb12a..ce90ae2413 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_redshift_rop.py @@ -42,11 +42,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix") beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix") - # Store whether we are splitting the render job (export + render) - split_render = bool(rop.parm("RS_archive_enable").eval()) - instance.data["splitRender"] = split_render + export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "RS_archive_file", pad_character="0" ) @@ -63,9 +61,12 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): full_exr_mode = (rop.evalParm("RS_outputMultilayerMode") == "2") if full_exr_mode: # Ignore beauty suffix if full mode is enabled - # As this is what the rop does. + # As this is what the rop does. beauty_suffix = "" + # Assume it's a multipartExr Render. + multipartExr = True + # Default beauty/main layer AOV beauty_product = self.get_render_product_name( prefix=default_prefix, suffix=beauty_suffix @@ -75,7 +76,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): beauty_suffix: self.generate_expected_files(instance, beauty_product) } - + aovs_rop = rop.parm("RS_aovGetFromNode").evalAsNode() if aovs_rop: rop = aovs_rop @@ -98,13 +99,21 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): if rop.parm(f"RS_aovID_{i}").evalAsString() == "CRYPTOMATTE" or \ not full_exr_mode: - + aov_product = self.get_render_product_name(aov_prefix, aov_suffix) render_products.append(aov_product) files_by_aov[aov_suffix] = self.generate_expected_files(instance, aov_product) # noqa + # Set to False as soon as we have a separated aov. + multipartExr = False + + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr + for product in render_products: self.log.debug("Found render product: %s" % product) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py index 9671945b9a..ed2de785a2 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_review_data.py @@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin): label = "Collect Review Data" # This specific order value is used so that # this plugin runs after CollectRopFrameRange - order = pyblish.api.CollectorOrder + 0.1 + # Also after CollectLocalRenderInstances + order = pyblish.api.CollectorOrder + 0.13 hosts = ["houdini"] families = ["review"] @@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin): ropnode_path = instance.data["instance_node"] ropnode = hou.node(ropnode_path) - camera_path = ropnode.parm("camera").eval() + # Get camera based on the instance_node type. + camera_path = self._get_camera_path(ropnode) camera_node = hou.node(camera_path) if not camera_node: self.log.warning("No valid camera node found on review node: " @@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin): # Store focal length in `burninDataMembers` burnin_members = instance.data.setdefault("burninDataMembers", {}) burnin_members["focalLength"] = focal_length + + def _get_camera_path(self, ropnode): + """Get the camera path associated with the given rop node. + + This function evaluates the camera parameter according to the + type of the given rop node. + + Returns: + Union[str, None]: Camera path or None. + + This function can return empty string if the camera + path is empty i.e. no camera path. + """ + + if ropnode.type().name() in { + "opengl", "karma", "ifd", "arnold" + }: + return ropnode.parm("camera").eval() + + elif ropnode.type().name() == "Redshift_ROP": + return ropnode.parm("RS_renderCamera").eval() + + elif ropnode.type().name() == "vray_renderer": + return ropnode.parm("render_camera").eval() + + return None diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py new file mode 100644 index 0000000000..78dc5fe11a --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_reviewable_instances.py @@ -0,0 +1,22 @@ +import pyblish.api + + +class CollectReviewableInstances(pyblish.api.InstancePlugin): + """Collect Reviewable Instances. + + Basically, all instances of the specified families + with creator_attribure["review"] + """ + + order = pyblish.api.CollectorOrder + label = "Collect Reviewable Instances" + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + def process(self, instance): + creator_attribute = instance.data["creator_attributes"] + + instance.data["review"] = creator_attribute.get("review", False) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py index 62b7dcdd5d..c39b1db103 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_vray_rop.py @@ -45,12 +45,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): render_products = [] # TODO: add render elements if render element - # Store whether we are splitting the render job in an export + render - split_render = rop.parm("render_export_mode").eval() == "2" - instance.data["splitRender"] = split_render export_prefix = None export_products = [] - if split_render: + if instance.data["splitRender"]: export_prefix = evalParmNoFrame( rop, "render_export_filepath", pad_character="0" ) @@ -70,6 +67,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): "": self.generate_expected_files(instance, beauty_product)} + # Assume it's a multipartExr Render. + multipartExr = True + if instance.data.get("RenderElement", True): render_element = self.get_render_element_name(rop, default_prefix) if render_element: @@ -77,7 +77,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): render_products.append(renderpass) files_by_aov[aov] = self.generate_expected_files( instance, renderpass) + # Set to False as soon as we have a separated aov. + multipartExr = False + # Review Logic expects this key to exist and be True + # if render is a multipart Exr. + # As long as we have one AOV then multipartExr should be True. + instance.data["multipartExr"] = multipartExr for product in render_products: self.log.debug("Found render product: %s" % product) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py index daf30b26ed..7ae476d2b4 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py @@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor): staging_dir = os.path.dirname(output) instance.data["stagingDir"] = staging_dir - file_name = os.path.basename(output) + if instance.data.get("frames"): + # list of files + files = instance.data["frames"] + else: + # single file + files = os.path.basename(output) # We run the render - self.log.info("Writing alembic '%s' to '%s'" % (file_name, + self.log.info("Writing alembic '%s' to '%s'" % (files, staging_dir)) render_rop(ropnode) @@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor): representation = { 'name': 'abc', 'ext': 'abc', - 'files': file_name, + 'files': files, "stagingDir": staging_dir, } instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py index 57bb8b881a..26a216e335 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py @@ -19,6 +19,16 @@ class ExtractOpenGL(publish.Extractor, def process(self, instance): ropnode = hou.node(instance.data.get("instance_node")) + # This plugin is triggered when marking render as reviewable. + # Therefore, this plugin will run on over wrong instances. + # TODO: Don't run this plugin on wrong instances. + # This plugin should run only on review product type + # with instance node of opengl type. + if ropnode.type().name() != "opengl": + self.log.debug("Skipping OpenGl extraction. Rop node {} " + "is not an OpenGl node.".format(ropnode.path())) + return + output = ropnode.evalParm("picture") staging_dir = os.path.normpath(os.path.dirname(output)) instance.data["stagingDir"] = staging_dir diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py new file mode 100644 index 0000000000..7b4762a25f --- /dev/null +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_render.py @@ -0,0 +1,74 @@ +import pyblish.api + +from ayon_core.pipeline import publish +from ayon_core.hosts.houdini.api.lib import render_rop +import hou +import os + + +class ExtractRender(publish.Extractor): + + order = pyblish.api.ExtractorOrder + label = "Extract Render" + hosts = ["houdini"] + families = ["mantra_rop", + "karma_rop", + "redshift_rop", + "arnold_rop", + "vray_rop"] + + def process(self, instance): + creator_attribute = instance.data["creator_attributes"] + product_type = instance.data["productType"] + rop_node = hou.node(instance.data.get("instance_node")) + + # Align split parameter value on rop node to the render target. + if instance.data["splitRender"]: + if product_type == "arnold_rop": + rop_node.setParms({"ar_ass_export_enable": 1}) + elif product_type == "mantra_rop": + rop_node.setParms({"soho_outputmode": 1}) + elif product_type == "redshift_rop": + rop_node.setParms({"RS_archive_enable": 1}) + elif product_type == "vray_rop": + rop_node.setParms({"render_export_mode": "2"}) + else: + if product_type == "arnold_rop": + rop_node.setParms({"ar_ass_export_enable": 0}) + elif product_type == "mantra_rop": + rop_node.setParms({"soho_outputmode": 0}) + elif product_type == "redshift_rop": + rop_node.setParms({"RS_archive_enable": 0}) + elif product_type == "vray_rop": + rop_node.setParms({"render_export_mode": "1"}) + + if instance.data.get("farm"): + self.log.debug("Render should be processed on farm, skipping local render.") + return + + if creator_attribute.get("render_target") == "local": + ropnode = hou.node(instance.data.get("instance_node")) + render_rop(ropnode) + + # `ExpectedFiles` is a list that includes one dict. + expected_files = instance.data["expectedFiles"][0] + # Each key in that dict is a list of files. + # Combine lists of files into one big list. + all_frames = [] + for value in expected_files.values(): + if isinstance(value, str): + all_frames.append(value) + elif isinstance(value, list): + all_frames.extend(value) + # Check missing frames. + # Frames won't exist if user cancels the render. + missing_frames = [ + frame + for frame in all_frames + if not os.path.exists(frame) + ] + if missing_frames: + # TODO: Use user friendly error reporting. + raise RuntimeError("Failed to complete render extraction. " + "Missing output files: {}".format( + missing_frames)) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py index fe8fa25f10..3e9291d5c2 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/increment_current_file.py @@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder + 9.0 hosts = ["houdini"] families = ["workfile", - "redshift_rop", - "arnold_rop", + "usdrender", "mantra_rop", "karma_rop", - "usdrender", + "redshift_rop", + "arnold_rop", + "vray_rop", + "render.local.hou", "publish.hou"] optional = True diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py index fdf03d5cba..91bd36018a 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- -import sys +import hou import pyblish.api -import six from ayon_core.pipeline import PublishValidationError @@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: raise PublishValidationError( - ("Output node(s) `{}` are incorrect. " - "See plug-in log for details.").format(invalid), - title=self.label + "Output node '{}' is incorrect. " + "See plug-in log for details.".format(invalid), + title=self.label, + description=( + "### Invalid COP output node\n\n" + "The output node path for the instance must be set to a " + "valid COP node path.\n\nSee the log for more details." + ) ) @classmethod def get_invalid(cls, instance): + output_node = instance.data.get("output_node") - import hou - - try: - output_node = instance.data["output_node"] - except KeyError: - six.reraise( - PublishValidationError, - PublishValidationError( - "Can't determine COP output node.", - title=cls.__name__), - sys.exc_info()[2] - ) - - if output_node is None: + if not output_node: node = hou.node(instance.data.get("instance_node")) cls.log.error( "COP Output node in '%s' does not exist. " @@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): cls.log.error( "Output node %s is not a COP node. " "COP Path must point to a COP node, " - "instead found category type: %s" - % (output_node.path(), output_node.type().category().name()) + "instead found category type: %s", + output_node.path(), output_node.type().category().name() ) return [output_node.path()] @@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): # is Cop2 to avoid potential edge case scenarios even though # the isinstance check above should be stricter than this category if output_node.type().category().name() != "Cop2": - raise PublishValidationError( - ( - "Output node {} is not of category Cop2." - " This is a bug..." - ).format(output_node.path()), - title=cls.label) + cls.log.error( + "Output node %s is not of category Cop2.", output_node.path() + ) + return [output_node.path()] diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py index d3afa83b67..fa532c5437 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py @@ -45,14 +45,29 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin, category="houdini") apply_plugin_settings_automatically(cls, settings, logger=cls.log) + # workfile settings added in '0.2.13' + color_settings = project_settings["houdini"]["imageio"].get( + "workfile", {} + ) # Add review color settings - color_settings = project_settings["houdini"]["imageio"]["workfile"] - if color_settings["enabled"]: + if color_settings.get("enabled"): cls.review_color_space = color_settings.get("review_color_space") def process(self, instance): + rop_node = hou.node(instance.data["instance_node"]) + + # This plugin is triggered when marking render as reviewable. + # Therefore, this plugin will run on over wrong instances. + # TODO: Don't run this plugin on wrong instances. + # This plugin should run only on review product type + # with instance node of opengl type. + if rop_node.type().name() != "opengl": + self.log.debug("Skipping Validation. Rop node {} " + "is not an OpenGl node.".format(rop_node.path())) + return + if not self.is_active(instance.data): return @@ -63,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin, ) return - rop_node = hou.node(instance.data["instance_node"]) if rop_node.evalParm("colorcorrect") != 2: # any colorspace settings other than default requires # 'Color Correct' parm to be set to 'OpenColorIO' diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py index b6007d3f0f..0b09306b0d 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_scene_review.py @@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin): report = [] instance_node = hou.node(instance.data.get("instance_node")) + # This plugin is triggered when marking render as reviewable. + # Therefore, this plugin will run on over wrong instances. + # TODO: Don't run this plugin on wrong instances. + # This plugin should run only on review product type + # with instance node of opengl type. + if instance_node.type().name() != "opengl": + self.log.debug("Skipping Validation. Rop node {} " + "is not an OpenGl node.".format(instance_node.path())) + return + invalid = self.get_invalid_scene_path(instance_node) if invalid: report.append(invalid) diff --git a/client/ayon_core/hosts/houdini/startup/OPmenu.xml b/client/ayon_core/hosts/houdini/startup/OPmenu.xml new file mode 100644 index 0000000000..0a7b265fa1 --- /dev/null +++ b/client/ayon_core/hosts/houdini/startup/OPmenu.xml @@ -0,0 +1,29 @@ + + + + + + + + opmenu.unsynchronize + + opmenu.vhda_create + + + + + + + + + + diff --git a/client/ayon_core/hosts/max/api/lib.py b/client/ayon_core/hosts/max/api/lib.py index 02b099b3ff..f20f754248 100644 --- a/client/ayon_core/hosts/max/api/lib.py +++ b/client/ayon_core/hosts/max/api/lib.py @@ -6,12 +6,9 @@ import json from typing import Any, Dict, Union import six -import ayon_api from ayon_core.pipeline import ( get_current_project_name, - get_current_folder_path, - get_current_task_name, colorspace ) from ayon_core.settings import get_project_settings @@ -372,12 +369,8 @@ def reset_colorspace(): """ if int(get_max_version()) < 2024: return - project_name = get_current_project_name() - colorspace_mgr = rt.ColorPipelineMgr - project_settings = get_project_settings(project_name) - max_config_data = colorspace.get_imageio_config( - project_name, "max", project_settings) + max_config_data = colorspace.get_current_context_imageio_config_preset() if max_config_data: ocio_config_path = max_config_data["path"] colorspace_mgr = rt.ColorPipelineMgr @@ -392,10 +385,7 @@ def check_colorspace(): "because Max main window can't be found.") if int(get_max_version()) >= 2024: color_mgr = rt.ColorPipelineMgr - project_name = get_current_project_name() - project_settings = get_project_settings(project_name) - max_config_data = colorspace.get_imageio_config( - project_name, "max", project_settings) + max_config_data = colorspace.get_current_context_imageio_config_preset() if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"): if not is_headless(): from ayon_core.tools.utils import SimplePopup @@ -496,9 +486,9 @@ def object_transform_set(container_children): """ transform_set = {} for node in container_children: - name = f"{node.name}.transform" + name = f"{node}.transform" transform_set[name] = node.pos - name = f"{node.name}.scale" + name = f"{node}.scale" transform_set[name] = node.scale return transform_set @@ -519,6 +509,36 @@ def get_plugins() -> list: return plugin_info_list +def update_modifier_node_names(event, node): + """Update the name of the nodes after renaming + + Args: + event (pymxs.MXSWrapperBase): Event Name ( + Mandatory argument for rt.NodeEventCallback) + node (list): Event Number ( + Mandatory argument for rt.NodeEventCallback) + + """ + containers = [ + obj + for obj in rt.Objects + if ( + rt.ClassOf(obj) == rt.Container + and rt.getUserProp(obj, "id") == "pyblish.avalon.instance" + and rt.getUserProp(obj, "productType") not in { + "workfile", "tyflow" + } + ) + ] + if not containers: + return + for container in containers: + ayon_data = container.modifiers[0].openPypeData + updated_node_names = [str(node.node) for node + in ayon_data.all_handles] + rt.setProperty(ayon_data, "sel_list", updated_node_names) + + @contextlib.contextmanager def render_resolution(width, height): """Set render resolution option during context diff --git a/client/ayon_core/hosts/max/api/pipeline.py b/client/ayon_core/hosts/max/api/pipeline.py index 675f36c24f..d9cfc3407f 100644 --- a/client/ayon_core/hosts/max/api/pipeline.py +++ b/client/ayon_core/hosts/max/api/pipeline.py @@ -52,17 +52,15 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): self._has_been_setup = True - def context_setting(): - return lib.set_context_setting() - - rt.callbacks.addScript(rt.Name('systemPostNew'), - context_setting) + rt.callbacks.addScript(rt.Name('systemPostNew'), on_new) rt.callbacks.addScript(rt.Name('filePostOpen'), lib.check_colorspace) rt.callbacks.addScript(rt.Name('postWorkspaceChange'), self._deferred_menu_creation) + rt.NodeEventCallback( + nameChanged=lib.update_modifier_node_names) def workfile_has_unsaved_changes(self): return rt.getSaveRequired() @@ -161,6 +159,14 @@ def ls() -> list: yield lib.read(container) +def on_new(): + lib.set_context_setting() + if rt.checkForSave(): + rt.resetMaxFile(rt.Name("noPrompt")) + rt.clearUndoBuffer() + rt.redrawViews() + + def containerise(name: str, nodes: list, context, namespace=None, loader=None, suffix="_CON"): data = { diff --git a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py index 4f982dd5ba..97b8c6cd52 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py +++ b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py @@ -117,7 +117,7 @@ class MaxSceneLoader(load.LoaderPlugin): ) for max_obj, obj_name in zip(max_objects, max_object_names): max_obj.name = f"{namespace}:{obj_name}" - max_container.append(rt.getNodeByName(max_obj.name)) + max_container.append(max_obj) return containerise( name, max_container, context, namespace, loader=self.__class__.__name__) @@ -158,11 +158,11 @@ class MaxSceneLoader(load.LoaderPlugin): current_max_object_names): max_obj.name = f"{namespace}:{obj_name}" max_objects.append(max_obj) - max_transform = f"{max_obj.name}.transform" + max_transform = f"{max_obj}.transform" if max_transform in transform_data.keys(): max_obj.pos = transform_data[max_transform] or 0 max_obj.scale = transform_data[ - f"{max_obj.name}.scale"] or 0 + f"{max_obj}.scale"] or 0 update_custom_attribute_data(node, max_objects) lib.imprint(container["instance_node"], { diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py index 82cad71c3e..6f5de20ae0 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py @@ -76,11 +76,11 @@ class FbxModelLoader(load.LoaderPlugin): for fbx_object in current_fbx_objects: fbx_object.name = f"{namespace}:{fbx_object.name}" fbx_objects.append(fbx_object) - fbx_transform = f"{fbx_object.name}.transform" + fbx_transform = f"{fbx_object}.transform" if fbx_transform in transform_data.keys(): fbx_object.pos = transform_data[fbx_transform] or 0 fbx_object.scale = transform_data[ - f"{fbx_object.name}.scale"] or 0 + f"{fbx_object}.scale"] or 0 with maintained_selection(): rt.Select(node) diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py index 38f2cdf43c..a9119259df 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py @@ -67,11 +67,11 @@ class ObjLoader(load.LoaderPlugin): selections = rt.GetCurrentSelection() for selection in selections: selection.name = f"{namespace}:{selection.name}" - selection_transform = f"{selection.name}.transform" + selection_transform = f"{selection}.transform" if selection_transform in transform_data.keys(): selection.pos = transform_data[selection_transform] or 0 selection.scale = transform_data[ - f"{selection.name}.scale"] or 0 + f"{selection}.scale"] or 0 update_custom_attribute_data(node, selections) with maintained_selection(): rt.Select(node) diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py index 2b946eb2aa..2ed5d64a18 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py @@ -95,11 +95,11 @@ class ModelUSDLoader(load.LoaderPlugin): for children in asset.Children: children.name = f"{namespace}:{children.name}" usd_objects.append(children) - children_transform = f"{children.name}.transform" + children_transform = f"{children}.transform" if children_transform in transform_data.keys(): children.pos = transform_data[children_transform] or 0 children.scale = transform_data[ - f"{children.name}.scale"] or 0 + f"{children}.scale"] or 0 asset.name = f"{namespace}:{asset.name}" usd_objects.append(asset) diff --git a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py index 2efb7c7f62..47690f84e9 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py +++ b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py @@ -92,10 +92,10 @@ class OxAbcLoader(load.LoaderPlugin): abc.Parent = container abc.name = f"{namespace}:{abc.name}" ox_abc_objects.append(abc) - ox_transform = f"{abc.name}.transform" + ox_transform = f"{abc}.transform" if ox_transform in transform_data.keys(): abc.pos = transform_data[ox_transform] or 0 - abc.scale = transform_data[f"{abc.name}.scale"] or 0 + abc.scale = transform_data[f"{abc}.scale"] or 0 update_custom_attribute_data(node, ox_abc_objects) lib.imprint( container["instance_node"], diff --git a/client/ayon_core/hosts/max/startup/startup.ms b/client/ayon_core/hosts/max/startup/startup.ms index 2dfe53a6a5..c5b4f0e526 100644 --- a/client/ayon_core/hosts/max/startup/startup.ms +++ b/client/ayon_core/hosts/max/startup/startup.ms @@ -12,4 +12,4 @@ max create mode python.ExecuteFile startup -) \ No newline at end of file +) diff --git a/client/ayon_core/hosts/maya/api/alembic.py b/client/ayon_core/hosts/maya/api/alembic.py new file mode 100644 index 0000000000..6bd00e1cb1 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/alembic.py @@ -0,0 +1,350 @@ +import json +import logging +import os + +from maya import cmds # noqa + +from ayon_core.hosts.maya.api.lib import evaluation + +log = logging.getLogger(__name__) + +# The maya alembic export types +ALEMBIC_ARGS = { + "attr": (list, tuple), + "attrPrefix": (list, tuple), + "autoSubd": bool, + "dataFormat": str, + "endFrame": float, + "eulerFilter": bool, + "frameRange": str, # "start end"; overrides startFrame & endFrame + "frameRelativeSample": float, + "melPerFrameCallback": str, + "melPostJobCallback": str, + "noNormals": bool, + "preRoll": bool, + "pythonPerFrameCallback": str, + "pythonPostJobCallback": str, + "renderableOnly": bool, + "root": (list, tuple), + "selection": bool, + "startFrame": float, + "step": float, + "stripNamespaces": bool, + "userAttr": (list, tuple), + "userAttrPrefix": (list, tuple), + "uvWrite": bool, + "uvsOnly": bool, + "verbose": bool, + "wholeFrameGeo": bool, + "worldSpace": bool, + "writeColorSets": bool, + "writeCreases": bool, # Maya 2015 Ext1+ + "writeFaceSets": bool, + "writeUVSets": bool, # Maya 2017+ + "writeVisibility": bool, +} + + +def extract_alembic( + file, + attr=None, + attrPrefix=None, + dataFormat="ogawa", + endFrame=None, + eulerFilter=True, + frameRange="", + melPerFrameCallback=None, + melPostJobCallback=None, + noNormals=False, + preRoll=False, + preRollStartFrame=0, + pythonPerFrameCallback=None, + pythonPostJobCallback=None, + renderableOnly=False, + root=None, + selection=True, + startFrame=None, + step=1.0, + stripNamespaces=True, + userAttr=None, + userAttrPrefix=None, + uvsOnly=False, + uvWrite=True, + verbose=False, + wholeFrameGeo=False, + worldSpace=False, + writeColorSets=False, + writeCreases=False, + writeFaceSets=False, + writeUVSets=False, + writeVisibility=False +): + """Extract a single Alembic Cache. + + This extracts an Alembic cache using the `-selection` flag to minimize + the extracted content to solely what was Collected into the instance. + + Arguments: + file (str): The filepath to write the alembic file to. + + attr (list of str, optional): A specific geometric attribute to write + out. Defaults to []. + + attrPrefix (list of str, optional): Prefix filter for determining which + geometric attributes to write out. Defaults to ["ABC_"]. + + dataFormat (str): The data format to use for the cache, + defaults to "ogawa" + + endFrame (float): End frame of output. Ignored if `frameRange` + provided. + + eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with + an Euler filter. Euler filtering helps resolve irregularities in + rotations especially if X, Y, and Z rotations exceed 360 degrees. + Defaults to True. + + frameRange (tuple or str): Two-tuple with start and end frame or a + string formatted as: "startFrame endFrame". This argument + overrides `startFrame` and `endFrame` arguments. + + melPerFrameCallback (Optional[str]): MEL callback run per frame. + + melPostJobCallback (Optional[str]): MEL callback after last frame is + written. + + noNormals (bool): When on, normal data from the original polygon + objects is not included in the exported Alembic cache file. + + preRoll (bool): This frame range will not be sampled. + Defaults to False. + + preRollStartFrame (float): The frame to start scene + evaluation at. This is used to set the starting frame for time + dependent translations and can be used to evaluate run-up that + isn't actually translated. Defaults to 0. + + pythonPerFrameCallback (Optional[str]): Python callback run per frame. + + pythonPostJobCallback (Optional[str]): Python callback after last frame + is written. + + renderableOnly (bool): When on, any non-renderable nodes or hierarchy, + such as hidden objects, are not included in the Alembic file. + Defaults to False. + + root (list of str): Maya dag path which will be parented to + the root of the Alembic file. Defaults to [], which means the + entire scene will be written out. + + selection (bool): Write out all all selected nodes from the + active selection list that are descendents of the roots specified + with -root. Defaults to False. + + startFrame (float): Start frame of output. Ignored if `frameRange` + provided. + + step (float): The time interval (expressed in frames) at + which the frame range is sampled. Additional samples around each + frame can be specified with -frs. Defaults to 1.0. + + stripNamespaces (bool): When on, any namespaces associated with the + exported objects are removed from the Alembic file. For example, an + object with the namespace taco:foo:bar appears as bar in the + Alembic file. + + userAttr (list of str, optional): A specific user defined attribute to + write out. Defaults to []. + + userAttrPrefix (list of str, optional): Prefix filter for determining + which user defined attributes to write out. Defaults to []. + + uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes + will be written to the Alembic file. + + uvWrite (bool): When on, UV data from polygon meshes and subdivision + objects are written to the Alembic file. Only the current UV map is + included. + + verbose (bool): When on, outputs frame number information to the + Script Editor or output window during extraction. + + wholeFrameGeo (bool): Data for geometry will only be written + out on whole frames. Defaults to False. + + worldSpace (bool): When on, the top node in the node hierarchy is + stored as world space. By default, these nodes are stored as local + space. Defaults to False. + + writeColorSets (bool): Write all color sets on MFnMeshes as + color 3 or color 4 indexed geometry parameters with face varying + scope. Defaults to False. + + writeCreases (bool): If the mesh has crease edges or crease + vertices, the mesh (OPolyMesh) would now be written out as an OSubD + and crease info will be stored in the Alembic file. Otherwise, + creases info won't be preserved in Alembic file unless a custom + Boolean attribute SubDivisionMesh has been added to mesh node and + its value is true. Defaults to False. + + writeFaceSets (bool): Write all Face sets on MFnMeshes. + Defaults to False. + + writeUVSets (bool): Write all uv sets on MFnMeshes as vector + 2 indexed geometry parameters with face varying scope. Defaults to + False. + + writeVisibility (bool): Visibility state will be stored in + the Alembic file. Otherwise everything written out is treated as + visible. Defaults to False. + """ + + # Ensure alembic exporter is loaded + cmds.loadPlugin('AbcExport', quiet=True) + + # Alembic Exporter requires forward slashes + file = file.replace('\\', '/') + + # Ensure list arguments are valid. + attr = attr or [] + attrPrefix = attrPrefix or [] + userAttr = userAttr or [] + userAttrPrefix = userAttrPrefix or [] + root = root or [] + + # Pass the start and end frame on as `frameRange` so that it + # never conflicts with that argument + if not frameRange: + # Fallback to maya timeline if no start or end frame provided. + if startFrame is None: + startFrame = cmds.playbackOptions(query=True, + animationStartTime=True) + if endFrame is None: + endFrame = cmds.playbackOptions(query=True, + animationEndTime=True) + + # Ensure valid types are converted to frame range + assert isinstance(startFrame, ALEMBIC_ARGS["startFrame"]) + assert isinstance(endFrame, ALEMBIC_ARGS["endFrame"]) + frameRange = "{0} {1}".format(startFrame, endFrame) + else: + # Allow conversion from tuple for `frameRange` + if isinstance(frameRange, (list, tuple)): + assert len(frameRange) == 2 + frameRange = "{0} {1}".format(frameRange[0], frameRange[1]) + + # Assemble options + options = { + "selection": selection, + "frameRange": frameRange, + "eulerFilter": eulerFilter, + "noNormals": noNormals, + "preRoll": preRoll, + "root": root, + "renderableOnly": renderableOnly, + "uvWrite": uvWrite, + "uvsOnly": uvsOnly, + "writeColorSets": writeColorSets, + "writeFaceSets": writeFaceSets, + "wholeFrameGeo": wholeFrameGeo, + "worldSpace": worldSpace, + "writeVisibility": writeVisibility, + "writeUVSets": writeUVSets, + "writeCreases": writeCreases, + "dataFormat": dataFormat, + "step": step, + "attr": attr, + "attrPrefix": attrPrefix, + "userAttr": userAttr, + "userAttrPrefix": userAttrPrefix, + "stripNamespaces": stripNamespaces, + "verbose": verbose + } + + # Validate options + for key, value in options.copy().items(): + + # Discard unknown options + if key not in ALEMBIC_ARGS: + log.warning("extract_alembic() does not support option '%s'. " + "Flag will be ignored..", key) + options.pop(key) + continue + + # Validate value type + valid_types = ALEMBIC_ARGS[key] + if not isinstance(value, valid_types): + raise TypeError("Alembic option unsupported type: " + "{0} (expected {1})".format(value, valid_types)) + + # Ignore empty values, like an empty string, since they mess up how + # job arguments are built + if isinstance(value, (list, tuple)): + value = [x for x in value if x.strip()] + + # Ignore option completely if no values remaining + if not value: + options.pop(key) + continue + + options[key] = value + + # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ + maya_version = int(cmds.about(version=True)) + if maya_version >= 2018: + options['autoSubd'] = options.pop('writeCreases', False) + + # Only add callbacks if they are set so that we're not passing `None` + callbacks = { + "melPerFrameCallback": melPerFrameCallback, + "melPostJobCallback": melPostJobCallback, + "pythonPerFrameCallback": pythonPerFrameCallback, + "pythonPostJobCallback": pythonPostJobCallback, + } + for key, callback in callbacks.items(): + if callback: + options[key] = str(callback) + + # Format the job string from options + job_args = list() + for key, value in options.items(): + if isinstance(value, (list, tuple)): + for entry in value: + job_args.append("-{} {}".format(key, entry)) + elif isinstance(value, bool): + # Add only when state is set to True + if value: + job_args.append("-{0}".format(key)) + else: + job_args.append("-{0} {1}".format(key, value)) + + job_str = " ".join(job_args) + job_str += ' -file "%s"' % file + + # Ensure output directory exists + parent_dir = os.path.dirname(file) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + if verbose: + log.debug("Preparing Alembic export with options: %s", + json.dumps(options, indent=4)) + log.debug("Extracting Alembic with job arguments: %s", job_str) + + # Perform extraction + print("Alembic Job Arguments : {}".format(job_str)) + + # Disable the parallel evaluation temporarily to ensure no buggy + # exports are made. (PLN-31) + # TODO: Make sure this actually fixes the issues + with evaluation("off"): + cmds.AbcExport( + j=job_str, + verbose=verbose, + preRollStartFrame=preRollStartFrame + ) + + if verbose: + log.debug("Extracted Alembic to: %s", file) + + return file diff --git a/client/ayon_core/hosts/maya/api/fbx.py b/client/ayon_core/hosts/maya/api/fbx.py index 939da4011b..fd1bf2c901 100644 --- a/client/ayon_core/hosts/maya/api/fbx.py +++ b/client/ayon_core/hosts/maya/api/fbx.py @@ -47,7 +47,7 @@ class FBXExtractor: "smoothMesh": bool, "instances": bool, # "referencedContainersContent": bool, # deprecated in Maya 2016+ - "bakeComplexAnimation": int, + "bakeComplexAnimation": bool, "bakeComplexStart": int, "bakeComplexEnd": int, "bakeComplexStep": int, @@ -59,6 +59,7 @@ class FBXExtractor: "constraints": bool, "lights": bool, "embeddedTextures": bool, + "includeChildren": bool, "inputConnections": bool, "upAxis": str, # x, y or z, "triangulate": bool, @@ -102,6 +103,7 @@ class FBXExtractor: "constraints": False, "lights": True, "embeddedTextures": False, + "includeChildren": True, "inputConnections": True, "upAxis": "y", "triangulate": False, diff --git a/client/ayon_core/hosts/maya/api/lib.py b/client/ayon_core/hosts/maya/api/lib.py index 321bcbc0b5..2b41ffc06c 100644 --- a/client/ayon_core/hosts/maya/api/lib.py +++ b/client/ayon_core/hosts/maya/api/lib.py @@ -70,37 +70,6 @@ DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] -# The maya alembic export types -_alembic_options = { - "startFrame": float, - "endFrame": float, - "frameRange": str, # "start end"; overrides startFrame & endFrame - "eulerFilter": bool, - "frameRelativeSample": float, - "noNormals": bool, - "renderableOnly": bool, - "step": float, - "stripNamespaces": bool, - "uvWrite": bool, - "wholeFrameGeo": bool, - "worldSpace": bool, - "writeVisibility": bool, - "writeColorSets": bool, - "writeFaceSets": bool, - "writeCreases": bool, # Maya 2015 Ext1+ - "writeUVSets": bool, # Maya 2017+ - "dataFormat": str, - "root": (list, tuple), - "attr": (list, tuple), - "attrPrefix": (list, tuple), - "userAttr": (list, tuple), - "melPerFrameCallback": str, - "melPostJobCallback": str, - "pythonPerFrameCallback": str, - "pythonPostJobCallback": str, - "selection": bool -} - INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000} FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94} @@ -1330,7 +1299,7 @@ def is_visible(node, override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node)) override_visibility = cmds.getAttr('{}.overrideVisibility'.format( node)) - if override_enabled and override_visibility: + if override_enabled and not override_visibility: return False if parentHidden: @@ -1346,178 +1315,6 @@ def is_visible(node, return True - -def extract_alembic(file, - startFrame=None, - endFrame=None, - selection=True, - uvWrite=True, - eulerFilter=True, - dataFormat="ogawa", - verbose=False, - **kwargs): - """Extract a single Alembic Cache. - - This extracts an Alembic cache using the `-selection` flag to minimize - the extracted content to solely what was Collected into the instance. - - Arguments: - - startFrame (float): Start frame of output. Ignored if `frameRange` - provided. - - endFrame (float): End frame of output. Ignored if `frameRange` - provided. - - frameRange (tuple or str): Two-tuple with start and end frame or a - string formatted as: "startFrame endFrame". This argument - overrides `startFrame` and `endFrame` arguments. - - dataFormat (str): The data format to use for the cache, - defaults to "ogawa" - - verbose (bool): When on, outputs frame number information to the - Script Editor or output window during extraction. - - noNormals (bool): When on, normal data from the original polygon - objects is not included in the exported Alembic cache file. - - renderableOnly (bool): When on, any non-renderable nodes or hierarchy, - such as hidden objects, are not included in the Alembic file. - Defaults to False. - - stripNamespaces (bool): When on, any namespaces associated with the - exported objects are removed from the Alembic file. For example, an - object with the namespace taco:foo:bar appears as bar in the - Alembic file. - - uvWrite (bool): When on, UV data from polygon meshes and subdivision - objects are written to the Alembic file. Only the current UV map is - included. - - worldSpace (bool): When on, the top node in the node hierarchy is - stored as world space. By default, these nodes are stored as local - space. Defaults to False. - - eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with - an Euler filter. Euler filtering helps resolve irregularities in - rotations especially if X, Y, and Z rotations exceed 360 degrees. - Defaults to True. - - """ - - # Ensure alembic exporter is loaded - cmds.loadPlugin('AbcExport', quiet=True) - - # Alembic Exporter requires forward slashes - file = file.replace('\\', '/') - - # Pass the start and end frame on as `frameRange` so that it - # never conflicts with that argument - if "frameRange" not in kwargs: - # Fallback to maya timeline if no start or end frame provided. - if startFrame is None: - startFrame = cmds.playbackOptions(query=True, - animationStartTime=True) - if endFrame is None: - endFrame = cmds.playbackOptions(query=True, - animationEndTime=True) - - # Ensure valid types are converted to frame range - assert isinstance(startFrame, _alembic_options["startFrame"]) - assert isinstance(endFrame, _alembic_options["endFrame"]) - kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame) - else: - # Allow conversion from tuple for `frameRange` - frame_range = kwargs["frameRange"] - if isinstance(frame_range, (list, tuple)): - assert len(frame_range) == 2 - kwargs["frameRange"] = "{0} {1}".format(frame_range[0], - frame_range[1]) - - # Assemble options - options = { - "selection": selection, - "uvWrite": uvWrite, - "eulerFilter": eulerFilter, - "dataFormat": dataFormat - } - options.update(kwargs) - - # Validate options - for key, value in options.copy().items(): - - # Discard unknown options - if key not in _alembic_options: - log.warning("extract_alembic() does not support option '%s'. " - "Flag will be ignored..", key) - options.pop(key) - continue - - # Validate value type - valid_types = _alembic_options[key] - if not isinstance(value, valid_types): - raise TypeError("Alembic option unsupported type: " - "{0} (expected {1})".format(value, valid_types)) - - # Ignore empty values, like an empty string, since they mess up how - # job arguments are built - if isinstance(value, (list, tuple)): - value = [x for x in value if x.strip()] - - # Ignore option completely if no values remaining - if not value: - options.pop(key) - continue - - options[key] = value - - # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ - maya_version = int(cmds.about(version=True)) - if maya_version >= 2018: - options['autoSubd'] = options.pop('writeCreases', False) - - # Format the job string from options - job_args = list() - for key, value in options.items(): - if isinstance(value, (list, tuple)): - for entry in value: - job_args.append("-{} {}".format(key, entry)) - elif isinstance(value, bool): - # Add only when state is set to True - if value: - job_args.append("-{0}".format(key)) - else: - job_args.append("-{0} {1}".format(key, value)) - - job_str = " ".join(job_args) - job_str += ' -file "%s"' % file - - # Ensure output directory exists - parent_dir = os.path.dirname(file) - if not os.path.exists(parent_dir): - os.makedirs(parent_dir) - - if verbose: - log.debug("Preparing Alembic export with options: %s", - json.dumps(options, indent=4)) - log.debug("Extracting Alembic with job arguments: %s", job_str) - - # Perform extraction - print("Alembic Job Arguments : {}".format(job_str)) - - # Disable the parallel evaluation temporarily to ensure no buggy - # exports are made. (PLN-31) - # TODO: Make sure this actually fixes the issues - with evaluation("off"): - cmds.AbcExport(j=job_str, verbose=verbose) - - if verbose: - log.debug("Extracted Alembic to: %s", file) - - return file - - # region ID def get_id_required_nodes(referenced_nodes=False, nodes=None, @@ -2520,7 +2317,16 @@ def set_scene_fps(fps, update=True): """ fps_mapping = { + '2': '2fps', + '3': '3fps', + '4': '4fps', + '5': '5fps', + '6': '6fps', + '8': '8fps', + '10': '10fps', + '12': '12fps', '15': 'game', + '16': '16fps', '24': 'film', '25': 'pal', '30': 'ntsc', @@ -2612,21 +2418,24 @@ def get_fps_for_current_context(): Returns: Union[int, float]: FPS value. """ - - project_name = get_current_project_name() - folder_path = get_current_folder_path() - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields={"attrib.fps"} - ) or {} - fps = folder_entity.get("attrib", {}).get("fps") + task_entity = get_current_task_entity(fields={"attrib"}) + fps = task_entity.get("attrib", {}).get("fps") if not fps: - project_entity = ayon_api.get_project( - project_name, fields=["attrib.fps"] + project_name = get_current_project_name() + folder_path = get_current_folder_path() + folder_entity = ayon_api.get_folder_by_path( + project_name, folder_path, fields={"attrib.fps"} ) or {} - fps = project_entity.get("attrib", {}).get("fps") + fps = folder_entity.get("attrib", {}).get("fps") if not fps: - fps = 25 + project_entity = ayon_api.get_project( + project_name, fields=["attrib.fps"] + ) or {} + fps = project_entity.get("attrib", {}).get("fps") + + if not fps: + fps = 25 return convert_to_maya_fps(fps) @@ -4403,3 +4212,23 @@ def create_rig_animation_instance( variant=namespace, pre_create_data={"use_selection": True} ) + + +def get_node_index_under_parent(node: str) -> int: + """Return the index of a DAG node under its parent. + + Arguments: + node (str): A DAG Node path. + + Returns: + int: The DAG node's index under its parents or world + + """ + node = cmds.ls(node, long=True)[0] # enforce long names + parent = node.rsplit("|", 1)[0] + if not parent: + return cmds.ls(assemblies=True, long=True).index(node) + else: + return cmds.listRelatives(parent, + children=True, + fullPath=True).index(node) diff --git a/client/ayon_core/hosts/maya/api/pipeline.py b/client/ayon_core/hosts/maya/api/pipeline.py index 864a0c1599..74d73e5f95 100644 --- a/client/ayon_core/hosts/maya/api/pipeline.py +++ b/client/ayon_core/hosts/maya/api/pipeline.py @@ -30,9 +30,11 @@ from ayon_core.pipeline import ( register_loader_plugin_path, register_inventory_action_path, register_creator_plugin_path, + register_workfile_build_plugin_path, deregister_loader_plugin_path, deregister_inventory_action_path, deregister_creator_plugin_path, + deregister_workfile_build_plugin_path, AYON_CONTAINER_ID, AVALON_CONTAINER_ID, ) @@ -47,7 +49,6 @@ from ayon_core.hosts.maya import MAYA_ROOT_DIR from ayon_core.hosts.maya.lib import create_workspace_mel from . import menu, lib -from .workfile_template_builder import MayaPlaceholderLoadPlugin from .workio import ( open_file, save_file, @@ -64,6 +65,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -93,7 +95,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) - self.log.info(PUBLISH_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) self.log.info("Installing callbacks ... ") register_event_callback("init", on_init) @@ -148,11 +150,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): def get_containers(self): return ls() - def get_workfile_build_placeholder_plugins(self): - return [ - MayaPlaceholderLoadPlugin - ] - @contextlib.contextmanager def maintained_selection(self): with lib.maintained_selection(): @@ -338,6 +335,7 @@ def uninstall(): deregister_loader_plugin_path(LOAD_PATH) deregister_creator_plugin_path(CREATE_PATH) deregister_inventory_action_path(INVENTORY_PATH) + deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH) menu.uninstall() diff --git a/client/ayon_core/hosts/maya/api/workfile_template_builder.py b/client/ayon_core/hosts/maya/api/workfile_template_builder.py index ddf19125e3..f4f9a34983 100644 --- a/client/ayon_core/hosts/maya/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/maya/api/workfile_template_builder.py @@ -12,14 +12,13 @@ from ayon_core.pipeline.workfile.workfile_template_builder import ( TemplateAlreadyImported, AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - PlaceholderLoadMixin, + PlaceholderItem, ) from ayon_core.tools.workfile_template_build import ( WorkfileBuildPlaceholderDialog, ) -from .lib import read, imprint, get_reference_node, get_main_window +from .lib import read, imprint, get_main_window PLACEHOLDER_SET = "PLACEHOLDERS_SET" @@ -91,170 +90,102 @@ class MayaTemplateBuilder(AbstractTemplateBuilder): return True -class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): - identifier = "maya.load" - label = "Maya load" +class MayaPlaceholderPlugin(PlaceholderPlugin): + """Base Placeholder Plugin for Maya with one unified cache. - def _collect_scene_placeholders(self): - # Cache placeholder data to shared data - placeholder_nodes = self.builder.get_shared_populate_data( - "placeholder_nodes" - ) - if placeholder_nodes is None: - attributes = cmds.ls("*.plugin_identifier", long=True) - placeholder_nodes = {} - for attribute in attributes: - node_name = attribute.rpartition(".")[0] - placeholder_nodes[node_name] = ( - self._parse_placeholder_node_data(node_name) - ) + Creates a locator as placeholder node, which during populate provide + all of its attributes defined on the locator's transform in + `placeholder.data` and where `placeholder.scene_identifier` is the + full path to the node. - self.builder.set_shared_populate_data( - "placeholder_nodes", placeholder_nodes - ) - return placeholder_nodes + Inherited classes must still implement `populate_placeholder` - def _parse_placeholder_node_data(self, node_name): - placeholder_data = read(node_name) - parent_name = ( - cmds.getAttr(node_name + ".parent", asString=True) - or node_name.rpartition("|")[0] - or "" - ) - if parent_name: - siblings = cmds.listRelatives(parent_name, children=True) - else: - siblings = cmds.ls(assemblies=True) - node_shortname = node_name.rpartition("|")[2] - current_index = cmds.getAttr(node_name + ".index", asString=True) - if current_index < 0: - current_index = siblings.index(node_shortname) + """ - placeholder_data.update({ - "parent": parent_name, - "index": current_index - }) - return placeholder_data + use_selection_as_parent = True + item_class = PlaceholderItem def _create_placeholder_name(self, placeholder_data): - placeholder_name_parts = placeholder_data["builder_type"].split("_") + return self.identifier.replace(".", "_") - pos = 1 - placeholder_product_type = placeholder_data.get("product_type") - if placeholder_product_type is None: - placeholder_product_type = placeholder_data.get("family") - - if placeholder_product_type: - placeholder_name_parts.insert(pos, placeholder_product_type) - pos += 1 - - # add loader arguments if any - loader_args = placeholder_data["loader_args"] - if loader_args: - loader_args = json.loads(loader_args.replace('\'', '\"')) - values = [v for v in loader_args.values()] - for value in values: - placeholder_name_parts.insert(pos, value) - pos += 1 - - placeholder_name = "_".join(placeholder_name_parts) - - return placeholder_name.capitalize() - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" + def _collect_scene_placeholders(self): + nodes_by_identifier = self.builder.get_shared_populate_data( + "placeholder_nodes" ) - if loaded_representation_ids is None: - try: - containers = cmds.sets("AVALON_CONTAINERS", q=True) - except ValueError: - containers = [] + if nodes_by_identifier is None: + # Cache placeholder data to shared data + nodes = cmds.ls("*.plugin_identifier", long=True, objectsOnly=True) - loaded_representation_ids = { - cmds.getAttr(container + ".representation") - for container in containers - } + nodes_by_identifier = {} + for node in nodes: + identifier = cmds.getAttr("{}.plugin_identifier".format(node)) + nodes_by_identifier.setdefault(identifier, []).append(node) + + # Set the cache self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids + "placeholder_nodes", nodes_by_identifier ) - return loaded_representation_ids + + return nodes_by_identifier def create_placeholder(self, placeholder_data): - selection = cmds.ls(selection=True) - if len(selection) > 1: - raise ValueError("More then one item are selected") - parent = selection[0] if selection else None + parent = None + if self.use_selection_as_parent: + selection = cmds.ls(selection=True) + if len(selection) > 1: + raise ValueError( + "More than one node is selected. " + "Please select only one to define the parent." + ) + parent = selection[0] if selection else None placeholder_data["plugin_identifier"] = self.identifier - placeholder_name = self._create_placeholder_name(placeholder_data) placeholder = cmds.spaceLocator(name=placeholder_name)[0] if parent: placeholder = cmds.parent(placeholder, selection[0])[0] - imprint(placeholder, placeholder_data) - - # Add helper attributes to keep placeholder info - cmds.addAttr( - placeholder, - longName="parent", - hidden=True, - dataType="string" - ) - cmds.addAttr( - placeholder, - longName="index", - hidden=True, - attributeType="short", - defaultValue=-1 - ) - - cmds.setAttr(placeholder + ".parent", "", type="string") + self.imprint(placeholder, placeholder_data) def update_placeholder(self, placeholder_item, placeholder_data): node_name = placeholder_item.scene_identifier - new_values = {} + + changed_values = {} for key, value in placeholder_data.items(): - placeholder_value = placeholder_item.data.get(key) - if value != placeholder_value: - new_values[key] = value - placeholder_item.data[key] = value + if value != placeholder_item.data.get(key): + changed_values[key] = value - for key in new_values.keys(): - cmds.deleteAttr(node_name + "." + key) + # Delete attributes to ensure we imprint new data with correct type + for key in changed_values.keys(): + placeholder_item.data[key] = value + if cmds.attributeQuery(key, node=node_name, exists=True): + attribute = "{}.{}".format(node_name, key) + cmds.deleteAttr(attribute) - imprint(node_name, new_values) + self.imprint(node_name, changed_values) def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, placeholder_data in scene_placeholders.items(): - if placeholder_data.get("plugin_identifier") != self.identifier: - continue - + placeholders = [] + nodes_by_identifier = self._collect_scene_placeholders() + for node in nodes_by_identifier.get(self.identifier, []): # TODO do data validations and maybe upgrades if they are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) + placeholder_data = self.read(node) + placeholders.append( + self.item_class(scene_identifier=node, + data=placeholder_data, + plugin=self) ) - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) + return placeholders def post_placeholder_process(self, placeholder, failed): """Cleanup placeholder after load of its corresponding representations. + Hide placeholder, add them to placeholder set. + Used only by PlaceholderCreateMixin and PlaceholderLoadMixin + Args: placeholder (PlaceholderItem): Item which was just used to load representation. @@ -263,82 +194,56 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): # Hide placeholder and add them to placeholder set node = placeholder.scene_identifier + # If we just populate the placeholders from current scene, the + # placeholder set will not be created so account for that. + if not cmds.objExists(PLACEHOLDER_SET): + cmds.sets(name=PLACEHOLDER_SET, empty=True) + cmds.sets(node, addElement=PLACEHOLDER_SET) cmds.hide(node) - cmds.setAttr(node + ".hiddenInOutliner", True) + cmds.setAttr("{}.hiddenInOutliner".format(node), True) def delete_placeholder(self, placeholder): - """Remove placeholder if building was successful""" - cmds.delete(placeholder.scene_identifier) + """Remove placeholder if building was successful - def load_succeed(self, placeholder, container): - self._parent_in_hierarchy(placeholder, container) - - def _parent_in_hierarchy(self, placeholder, container): - """Parent loaded container to placeholder's parent. - - ie : Set loaded content as placeholder's sibling - - Args: - container (str): Placeholder loaded containers + Used only by PlaceholderCreateMixin and PlaceholderLoadMixin. """ + node = placeholder.scene_identifier - if not container: - return + # To avoid that deleting a placeholder node will have Maya delete + # any objectSets the node was a member of we will first remove it + # from any sets it was a member of. This way the `PLACEHOLDERS_SET` + # will survive long enough + sets = cmds.listSets(o=node) or [] + for object_set in sets: + cmds.sets(node, remove=object_set) - roots = cmds.sets(container, q=True) or [] - ref_node = None - try: - ref_node = get_reference_node(roots) - except AssertionError as e: - self.log.info(e.args[0]) + cmds.delete(node) - nodes_to_parent = [] - for root in roots: - if ref_node: - ref_root = cmds.referenceQuery(root, nodes=True)[0] - ref_root = ( - cmds.listRelatives(ref_root, parent=True, path=True) or - [ref_root] - ) - nodes_to_parent.extend(ref_root) - continue - if root.endswith("_RN"): - # Backwards compatibility for hardcoded reference names. - refRoot = cmds.referenceQuery(root, n=True)[0] - refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot] - nodes_to_parent.extend(refRoot) - elif root not in cmds.listSets(allSets=True): - nodes_to_parent.append(root) + def imprint(self, node, data): + """Imprint call for placeholder node""" - elif not cmds.sets(root, q=True): - return + # Complicated data that can't be represented as flat maya attributes + # we write to json strings, e.g. multiselection EnumDef + for key, value in data.items(): + if isinstance(value, (list, tuple, dict)): + data[key] = "JSON::{}".format(json.dumps(value)) - # Move loaded nodes to correct index in outliner hierarchy - placeholder_form = cmds.xform( - placeholder.scene_identifier, - q=True, - matrix=True, - worldSpace=True - ) - scene_parent = cmds.listRelatives( - placeholder.scene_identifier, parent=True, fullPath=True - ) - for node in set(nodes_to_parent): - cmds.reorder(node, front=True) - cmds.reorder(node, relative=placeholder.data["index"]) - cmds.xform(node, matrix=placeholder_form, ws=True) - if scene_parent: - cmds.parent(node, scene_parent) - else: - if cmds.listRelatives(node, parent=True): - cmds.parent(node, world=True) + imprint(node, data) - holding_sets = cmds.listSets(object=placeholder.scene_identifier) - if not holding_sets: - return - for holding_set in holding_sets: - cmds.sets(roots, forceElement=holding_set) + def read(self, node): + """Read call for placeholder node""" + + data = read(node) + + # Complicated data that can't be represented as flat maya attributes + # we read from json strings, e.g. multiselection EnumDef + for key, value in data.items(): + if isinstance(value, str) and value.startswith("JSON::"): + value = value[len("JSON::"):] # strip of JSON:: prefix + data[key] = json.loads(value) + + return data def build_workfile_template(*args): diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation.py b/client/ayon_core/hosts/maya/plugins/create/create_animation.py deleted file mode 100644 index f30d9aba81..0000000000 --- a/client/ayon_core/hosts/maya/plugins/create/create_animation.py +++ /dev/null @@ -1,89 +0,0 @@ -from ayon_core.hosts.maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreateAnimation(plugin.MayaHiddenCreator): - """Animation output for character rigs - - We hide the animation creator from the UI since the creation of it is - automated upon loading a rig. There's an inventory action to recreate it - for loaded rigs if by chance someone deleted the animation instance. - """ - identifier = "io.openpype.creators.maya.animation" - name = "animationDefault" - label = "Animation" - product_type = "animation" - icon = "male" - - write_color_sets = False - write_face_sets = False - include_parent_hierarchy = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=self.write_color_sets), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=self.write_face_sets), - BoolDef("writeNormals", - label="Write normals", - tooltip="Write normals with the deforming geometry", - default=True), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=self.include_parent_hierarchy), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def apply_settings(self, project_settings): - super(CreateAnimation, self).apply_settings(project_settings) - # Hardcoding creator to be enabled due to existing settings would - # disable the creator causing the creator plugin to not be - # discoverable. - self.enabled = True diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py new file mode 100644 index 0000000000..069762e4ae --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py @@ -0,0 +1,138 @@ +from maya import cmds + +from ayon_core.hosts.maya.api import lib, plugin + +from ayon_core.lib import ( + BoolDef, + NumberDef, +) + + +def _get_animation_attr_defs(cls): + """Get Animation generic definitions.""" + defs = lib.collect_animation_defs() + defs.extend( + [ + BoolDef("farm", label="Submit to Farm"), + NumberDef("priority", label="Farm job Priority", default=50), + BoolDef("refresh", label="Refresh viewport during export"), + BoolDef( + "includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip=( + "Whether to include parent hierarchy of nodes in the " + "publish instance." + ) + ), + BoolDef( + "includeUserDefinedAttributes", + label="Include User Defined Attributes", + tooltip=( + "Whether to include all custom maya attributes found " + "on nodes as attributes in the Alembic data." + ) + ), + ] + ) + + return defs + + +def convert_legacy_alembic_creator_attributes(node_data, class_name): + """This is a legacy transfer of creator attributes to publish attributes + for ExtractAlembic/ExtractAnimation plugin. + """ + publish_attributes = node_data["publish_attributes"] + + if class_name in publish_attributes: + return node_data + + attributes = [ + "attr", + "attrPrefix", + "visibleOnly", + "writeColorSets", + "writeFaceSets", + "writeNormals", + "renderableOnly", + "visibleOnly", + "worldSpace", + "renderableOnly" + ] + plugin_attributes = {} + for attr in attributes: + if attr not in node_data["creator_attributes"]: + continue + value = node_data["creator_attributes"].pop(attr) + + plugin_attributes[attr] = value + + publish_attributes[class_name] = plugin_attributes + + return node_data + + +class CreateAnimation(plugin.MayaHiddenCreator): + """Animation output for character rigs + + We hide the animation creator from the UI since the creation of it is + automated upon loading a rig. There's an inventory action to recreate it + for loaded rigs if by chance someone deleted the animation instance. + """ + + identifier = "io.openpype.creators.maya.animation" + name = "animationDefault" + label = "Animation" + product_type = "animation" + icon = "male" + + write_color_sets = False + write_face_sets = False + include_parent_hierarchy = False + include_user_defined_attributes = False + + def read_instance_node(self, node): + node_data = super(CreateAnimation, self).read_instance_node(node) + node_data = convert_legacy_alembic_creator_attributes( + node_data, "ExtractAnimation" + ) + return node_data + + def get_instance_attr_defs(self): + defs = super(CreateAnimation, self).get_instance_attr_defs() + defs += _get_animation_attr_defs(self) + return defs + + +class CreatePointCache(plugin.MayaCreator): + """Alembic pointcache for animated data""" + + identifier = "io.openpype.creators.maya.pointcache" + label = "Pointcache" + product_type = "pointcache" + icon = "gears" + write_color_sets = False + write_face_sets = False + include_user_defined_attributes = False + + def read_instance_node(self, node): + node_data = super(CreatePointCache, self).read_instance_node(node) + node_data = convert_legacy_alembic_creator_attributes( + node_data, "ExtractAlembic" + ) + return node_data + + def get_instance_attr_defs(self): + defs = super(CreatePointCache, self).get_instance_attr_defs() + defs += _get_animation_attr_defs(self) + return defs + + def create(self, product_name, instance_data, pre_create_data): + instance = super(CreatePointCache, self).create( + product_name, instance_data, pre_create_data + ) + instance_node = instance.get("instance_node") + + # For Arnold standin proxy + proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) + cmds.sets(proxy_set, forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py index dc0ffb02c1..e321c13ca0 100644 --- a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -1,3 +1,5 @@ +from maya import cmds + from ayon_core.hosts.maya.api import ( lib, plugin @@ -87,16 +89,24 @@ class CreateArnoldSceneSource(plugin.MayaCreator): return defs + +class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource): + """Arnold Scene Source Proxy + + This product type facilitates working with proxy geometry in the viewport. + """ + + identifier = "io.openpype.creators.maya.assproxy" + label = "Arnold Scene Source Proxy" + product_type = "assProxy" + icon = "cube" + def create(self, product_name, instance_data, pre_create_data): - - from maya import cmds - instance = super(CreateArnoldSceneSource, self).create( product_name, instance_data, pre_create_data ) instance_node = instance.get("instance_node") - content = cmds.sets(name=instance_node + "_content_SET", empty=True) proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets([content, proxy], forceElement=instance_node) + cmds.sets([proxy], forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py deleted file mode 100644 index 05e3a1a29f..0000000000 --- a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py +++ /dev/null @@ -1,88 +0,0 @@ -from maya import cmds - -from ayon_core.hosts.maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreatePointCache(plugin.MayaCreator): - """Alembic pointcache for animated data""" - - identifier = "io.openpype.creators.maya.pointcache" - label = "Pointcache" - product_type = "pointcache" - icon = "gears" - write_color_sets = False - write_face_sets = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=False), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=False), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=False), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("refresh", - label="Refresh viewport during export", - default=False), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - default="", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def create(self, product_name, instance_data, pre_create_data): - - instance = super(CreatePointCache, self).create( - product_name, instance_data, pre_create_data - ) - instance_node = instance.get("instance_node") - - # For Arnold standin proxy - proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets(proxy_set, forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py index 4b7d2f42ab..ae3b68965a 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py @@ -12,6 +12,7 @@ from ayon_core.hosts.maya.api.lib import ( unique_namespace, get_attribute_input, maintained_selection, + get_fps_for_current_context ) from ayon_core.hosts.maya.api.pipeline import containerise from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type @@ -29,7 +30,13 @@ class ArnoldStandinLoader(load.LoaderPlugin): """Load as Arnold standin""" product_types = { - "ass", "animation", "model", "proxyAbc", "pointcache", "usd" + "ass", + "assProxy", + "animation", + "model", + "proxyAbc", + "pointcache", + "usd" } representations = {"ass", "abc", "usda", "usdc", "usd"} @@ -95,8 +102,10 @@ class ArnoldStandinLoader(load.LoaderPlugin): sequence = is_sequence(os.listdir(os.path.dirname(repre_path))) cmds.setAttr(standin_shape + ".useFrameExtension", sequence) - fps = float(version_attributes.get("fps")) or 25 - cmds.setAttr(standin_shape + ".abcFPS", fps) + fps = ( + version_attributes.get("fps") or get_fps_for_current_context() + ) + cmds.setAttr(standin_shape + ".abcFPS", float(fps)) nodes = [root, standin, standin_shape] if operator is not None: @@ -128,6 +137,18 @@ class ArnoldStandinLoader(load.LoaderPlugin): proxy_path = "/".join([os.path.dirname(path), proxy_basename]) return proxy_basename, proxy_path + def _update_operators(self, string_replace_operator, proxy_basename, path): + cmds.setAttr( + string_replace_operator + ".match", + proxy_basename.split(".")[0], + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path).split(".")[0], + type="string" + ) + def _setup_proxy(self, shape, path, namespace): proxy_basename, proxy_path = self._get_proxy_path(path) @@ -150,16 +171,7 @@ class ArnoldStandinLoader(load.LoaderPlugin): "*.(@node=='{}')".format(node_type), type="string" ) - cmds.setAttr( - string_replace_operator + ".match", - proxy_basename, - type="string" - ) - cmds.setAttr( - string_replace_operator + ".replace", - os.path.basename(path), - type="string" - ) + self._update_operators(string_replace_operator, proxy_basename, path) cmds.connectAttr( string_replace_operator + ".out", @@ -194,18 +206,9 @@ class ArnoldStandinLoader(load.LoaderPlugin): path = get_representation_path(repre_entity) proxy_basename, proxy_path = self._get_proxy_path(path) - # Whether there is proxy or so, we still update the string operator. + # Whether there is proxy or not, we still update the string operator. # If no proxy exists, the string operator won't replace anything. - cmds.setAttr( - string_replace_operator + ".match", - proxy_basename, - type="string" - ) - cmds.setAttr( - string_replace_operator + ".replace", - os.path.basename(path), - type="string" - ) + self._update_operators(string_replace_operator, proxy_basename, path) dso_path = path if os.path.exists(proxy_path): diff --git a/client/ayon_core/hosts/maya/plugins/load/load_image.py b/client/ayon_core/hosts/maya/plugins/load/load_image.py index 5b0858ce70..171920f747 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_image.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_image.py @@ -8,7 +8,7 @@ from ayon_core.pipeline import ( from ayon_core.pipeline.load.utils import get_representation_path_from_context from ayon_core.pipeline.colorspace import ( get_imageio_file_rules_colorspace_from_filepath, - get_imageio_config, + get_current_context_imageio_config_preset, get_imageio_file_rules ) from ayon_core.settings import get_project_settings @@ -270,8 +270,7 @@ class FileNodeLoader(load.LoaderPlugin): host_name = get_current_host_name() project_settings = get_project_settings(project_name) - config_data = get_imageio_config( - project_name, host_name, + config_data = get_current_context_imageio_config_preset( project_settings=project_settings ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py index 2ab6511ece..391c80c84e 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py @@ -58,4 +58,3 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): if instance.data.get("farm"): instance.data["families"].append("publish.farm") - diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py index 0db89bee31..fb71e128eb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -10,21 +10,23 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): # Offset to be after renderable camera collection. order = pyblish.api.CollectorOrder + 0.2 label = "Collect Arnold Scene Source" - families = ["ass"] + families = ["ass", "assProxy"] def process(self, instance): - objsets = instance.data["setMembers"] + instance.data["members"] = [] + for set_member in instance.data["setMembers"]: + if cmds.nodeType(set_member) != "objectSet": + instance.data["members"].extend(self.get_hierarchy(set_member)) + continue - for objset in objsets: - objset = str(objset) - members = cmds.sets(objset, query=True) + members = cmds.sets(set_member, query=True) members = cmds.ls(members, long=True) if members is None: - self.log.warning("Skipped empty instance: \"%s\" " % objset) + self.log.warning( + "Skipped empty instance: \"%s\" " % set_member + ) continue - if objset.endswith("content_SET"): - instance.data["contentMembers"] = self.get_hierarchy(members) - if objset.endswith("proxy_SET"): + if set_member.endswith("proxy_SET"): instance.data["proxy"] = self.get_hierarchy(members) # Use camera in object set if present else default to render globals @@ -33,7 +35,7 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] if renderable: camera = renderable[0] - for node in instance.data["contentMembers"]: + for node in instance.data["members"]: camera_shapes = cmds.listRelatives( node, shapes=True, type="camera" ) @@ -46,18 +48,11 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): self.log.debug("data: {}".format(instance.data)) def get_hierarchy(self, nodes): - """Return nodes with all their children. - - Arguments: - nodes (List[str]): List of nodes to collect children hierarchy for - - Returns: - list: Input nodes with their children hierarchy - - """ + """Return nodes with all their children""" nodes = cmds.ls(nodes, long=True) if not nodes: return [] - - children = get_all_children(nodes, ignore_intermediate_objects=True) - return list(children.union(nodes)) + children = get_all_children(nodes) + # Make sure nodes merged with children only + # contains unique entries + return list(set(nodes + list(children))) diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py index 16fef2e168..3d586d48fb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py @@ -14,7 +14,9 @@ class CollectUserDefinedAttributes(pyblish.api.InstancePlugin): def process(self, instance): # Collect user defined attributes. - if not instance.data.get("includeUserDefinedAttributes", False): + if not instance.data["creator_attributes"].get( + "includeUserDefinedAttributes" + ): return if "out_hierarchy" in instance.data: diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py index ed8f2ad40c..fb4c41f1de 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -17,8 +17,7 @@ class ExtractArnoldSceneSource(publish.Extractor): families = ["ass"] asciiAss = False - def process(self, instance): - staging_dir = self.staging_dir(instance) + def _pre_process(self, instance, staging_dir): file_path = os.path.join(staging_dir, "{}.ass".format(instance.name)) # Mask @@ -70,24 +69,38 @@ class ExtractArnoldSceneSource(publish.Extractor): "mask": mask } - filenames, nodes_by_id = self._extract( - instance.data["contentMembers"], attribute_data, kwargs - ) - if "representations" not in instance.data: instance.data["representations"] = [] + return attribute_data, kwargs + + def process(self, instance): + staging_dir = self.staging_dir(instance) + attribute_data, kwargs = self._pre_process(instance, staging_dir) + + filenames = self._extract( + instance.data["members"], attribute_data, kwargs + ) + + self._post_process( + instance, filenames, staging_dir, kwargs["startFrame"] + ) + + def _post_process(self, instance, filenames, staging_dir, frame_start): + nodes_by_id = self._nodes_by_id(instance[:]) representation = { "name": "ass", "ext": "ass", "files": filenames if len(filenames) > 1 else filenames[0], "stagingDir": staging_dir, - "frameStart": kwargs["startFrame"] + "frameStart": frame_start } instance.data["representations"].append(representation) - json_path = os.path.join(staging_dir, "{}.json".format(instance.name)) + json_path = os.path.join( + staging_dir, "{}.json".format(instance.name) + ) with open(json_path, "w") as f: json.dump(nodes_by_id, f) @@ -104,13 +117,68 @@ class ExtractArnoldSceneSource(publish.Extractor): "Extracted instance {} to: {}".format(instance.name, staging_dir) ) - # Extract proxy. - if not instance.data.get("proxy", []): - return + def _nodes_by_id(self, nodes): + nodes_by_id = defaultdict(list) - kwargs["filename"] = file_path.replace(".ass", "_proxy.ass") + for node in nodes: + id = lib.get_id(node) - filenames, _ = self._extract( + if id is None: + continue + + # Converting Maya hierarchy separator "|" to Arnold separator "/". + nodes_by_id[id].append(node.replace("|", "/")) + + return nodes_by_id + + def _extract(self, nodes, attribute_data, kwargs): + filenames = [] + with lib.attribute_values(attribute_data): + with lib.maintained_selection(): + self.log.debug( + "Writing: {}".format(nodes) + ) + cmds.select(nodes, noExpand=True) + + self.log.debug( + "Extracting ass sequence with: {}".format(kwargs) + ) + + exported_files = cmds.arnoldExportAss(**kwargs) + + for file in exported_files: + filenames.append(os.path.split(file)[1]) + + self.log.debug("Exported: {}".format(filenames)) + + return filenames + + +class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource): + """Extract the content of the instance to an Arnold Scene Source file.""" + + label = "Extract Arnold Scene Source Proxy" + hosts = ["maya"] + families = ["assProxy"] + asciiAss = True + + def process(self, instance): + staging_dir = self.staging_dir(instance) + attribute_data, kwargs = self._pre_process(instance, staging_dir) + + filenames, _ = self._duplicate_extract( + instance.data["members"], attribute_data, kwargs + ) + + self._post_process( + instance, filenames, staging_dir, kwargs["startFrame"] + ) + + kwargs["filename"] = os.path.join( + staging_dir, "{}_proxy.ass".format(instance.name) + ) + + filenames, _ = self._duplicate_extract( instance.data["proxy"], attribute_data, kwargs ) @@ -125,12 +193,11 @@ class ExtractArnoldSceneSource(publish.Extractor): instance.data["representations"].append(representation) - def _extract(self, nodes, attribute_data, kwargs): + def _duplicate_extract(self, nodes, attribute_data, kwargs): self.log.debug( "Writing {} with:\n{}".format(kwargs["filename"], kwargs) ) filenames = [] - nodes_by_id = defaultdict(list) # Duplicating nodes so they are direct children of the world. This # makes the hierarchy of any exported ass file the same. with lib.delete_after() as delete_bin: @@ -147,7 +214,9 @@ class ExtractArnoldSceneSource(publish.Extractor): if not shapes: continue - duplicate_transform = cmds.duplicate(node)[0] + basename = cmds.duplicate(node)[0] + parents = cmds.ls(node, long=True)[0].split("|")[:-1] + duplicate_transform = "|".join(parents + [basename]) if cmds.listRelatives(duplicate_transform, parent=True): duplicate_transform = cmds.parent( @@ -172,28 +241,7 @@ class ExtractArnoldSceneSource(publish.Extractor): duplicate_nodes.extend(shapes) delete_bin.append(duplicate_transform) - # Copy cbId to mtoa_constant. - for node in duplicate_nodes: - # Converting Maya hierarchy separator "|" to Arnold - # separator "/". - nodes_by_id[lib.get_id(node)].append(node.replace("|", "/")) - - with lib.attribute_values(attribute_data): - with lib.maintained_selection(): - self.log.debug( - "Writing: {}".format(duplicate_nodes) - ) - cmds.select(duplicate_nodes, noExpand=True) - - self.log.debug( - "Extracting ass sequence with: {}".format(kwargs) - ) - - exported_files = cmds.arnoldExportAss(**kwargs) - - for file in exported_files: - filenames.append(os.path.split(file)[1]) - - self.log.debug("Exported: {}".format(filenames)) + nodes_by_id = self._nodes_by_id(duplicate_nodes) + filenames = self._extract(duplicate_nodes, attribute_data, kwargs) return filenames, nodes_by_id diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py index 2c23f9b752..5f51dc38cb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py @@ -2,7 +2,7 @@ import os import json from ayon_core.pipeline import publish -from ayon_core.hosts.maya.api.lib import extract_alembic +from ayon_core.hosts.maya.api.alembic import extract_alembic from maya import cmds diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py index ee66ed2fb7..77b5b79b5f 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_fbx_animation.py @@ -35,7 +35,8 @@ class ExtractFBXAnimation(publish.Extractor): fbx_exporter = fbx.FBXExtractor(log=self.log) out_members = instance.data.get("animated_skeleton", []) # Export - instance.data["constraints"] = True + # TODO: need to set up the options for users to set up + # the flags they intended to export instance.data["skeletonDefinitions"] = True instance.data["referencedAssetsContent"] = True fbx_exporter.set_options_from_instance(instance) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py index 5de72f7674..cc930e49cc 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py @@ -1,17 +1,29 @@ import os +from collections import OrderedDict from maya import cmds from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, + get_all_children, suspended_refresh, maintained_selection, iter_visible_nodes_in_range ) +from ayon_core.lib import ( + BoolDef, + TextDef, + NumberDef, + EnumDef, + UISeparatorDef, + UILabelDef, +) +from ayon_core.pipeline.publish import AYONPyblishPluginMixin +from ayon_core.pipeline import KnownPublishError -class ExtractAlembic(publish.Extractor): +class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin): """Produce an alembic of just point positions and normals. Positions and normals, uvs, creases are preserved, but nothing more, @@ -27,8 +39,35 @@ class ExtractAlembic(publish.Extractor): targets = ["local", "remote"] # From settings + attr = [] + attrPrefix = [] bake_attributes = [] bake_attribute_prefixes = [] + dataFormat = "ogawa" + eulerFilter = False + melPerFrameCallback = "" + melPostJobCallback = "" + overrides = [] + preRoll = False + preRollStartFrame = 0 + pythonPerFrameCallback = "" + pythonPostJobCallback = "" + renderableOnly = False + stripNamespaces = True + uvsOnly = False + uvWrite = False + userAttr = "" + userAttrPrefix = "" + verbose = False + visibleOnly = False + wholeFrameGeo = False + worldSpace = True + writeColorSets = False + writeCreases = False + writeFaceSets = False + writeNormals = True + writeUVSets = False + writeVisibility = False def process(self, instance): if instance.data.get("farm"): @@ -41,16 +80,38 @@ class ExtractAlembic(publish.Extractor): start = float(instance.data.get("frameStartHandle", 1)) end = float(instance.data.get("frameEndHandle", 1)) - attrs = instance.data.get("attr", "").split(";") - attrs = [value for value in attrs if value.strip()] + attribute_values = self.get_attr_values_from_data( + instance.data + ) + + attrs = [ + attr.strip() + for attr in attribute_values.get("attr", "").split(";") + if attr.strip() + ] attrs += instance.data.get("userDefinedAttributes", []) attrs += self.bake_attributes attrs += ["cbId"] - attr_prefixes = instance.data.get("attrPrefix", "").split(";") - attr_prefixes = [value for value in attr_prefixes if value.strip()] + attr_prefixes = [ + attr.strip() + for attr in attribute_values.get("attrPrefix", "").split(";") + if attr.strip() + ] attr_prefixes += self.bake_attribute_prefixes + user_attrs = [ + attr.strip() + for attr in attribute_values.get("userAttr", "").split(";") + if attr.strip() + ] + + user_attr_prefixes = [ + attr.strip() + for attr in attribute_values.get("userAttrPrefix", "").split(";") + if attr.strip() + ] + self.log.debug("Extracting pointcache..") dirname = self.staging_dir(instance) @@ -58,28 +119,82 @@ class ExtractAlembic(publish.Extractor): filename = "{name}.abc".format(**instance.data) path = os.path.join(parent_dir, filename) - options = { - "step": instance.data.get("step", 1.0), - "attr": attrs, - "attrPrefix": attr_prefixes, - "writeVisibility": True, - "writeCreases": True, - "writeColorSets": instance.data.get("writeColorSets", False), - "writeFaceSets": instance.data.get("writeFaceSets", False), - "uvWrite": True, - "selection": True, - "worldSpace": instance.data.get("worldSpace", True) - } - + root = None if not instance.data.get("includeParentHierarchy", True): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = roots + root = roots - if int(cmds.about(version=True)) >= 2017: - # Since Maya 2017 alembic supports multiple uv sets - write them. - options["writeUVSets"] = True + kwargs = { + "file": path, + "attr": attrs, + "attrPrefix": attr_prefixes, + "userAttr": user_attrs, + "userAttrPrefix": user_attr_prefixes, + "dataFormat": attribute_values.get("dataFormat", self.dataFormat), + "endFrame": end, + "eulerFilter": attribute_values.get( + "eulerFilter", self.eulerFilter + ), + "preRoll": attribute_values.get("preRoll", self.preRoll), + "preRollStartFrame": attribute_values.get( + "preRollStartFrame", self.preRollStartFrame + ), + "renderableOnly": attribute_values.get( + "renderableOnly", self.renderableOnly + ), + "root": root, + "selection": True, + "startFrame": start, + "step": instance.data.get( + "creator_attributes", {} + ).get("step", 1.0), + "stripNamespaces": attribute_values.get( + "stripNamespaces", self.stripNamespaces + ), + "uvWrite": attribute_values.get("uvWrite", self.uvWrite), + "verbose": attribute_values.get("verbose", self.verbose), + "wholeFrameGeo": attribute_values.get( + "wholeFrameGeo", self.wholeFrameGeo + ), + "worldSpace": attribute_values.get("worldSpace", self.worldSpace), + "writeColorSets": attribute_values.get( + "writeColorSets", self.writeColorSets + ), + "writeCreases": attribute_values.get( + "writeCreases", self.writeCreases + ), + "writeFaceSets": attribute_values.get( + "writeFaceSets", self.writeFaceSets + ), + "writeUVSets": attribute_values.get( + "writeUVSets", self.writeUVSets + ), + "writeVisibility": attribute_values.get( + "writeVisibility", self.writeVisibility + ), + "uvsOnly": attribute_values.get( + "uvsOnly", self.uvsOnly + ), + "melPerFrameCallback": attribute_values.get( + "melPerFrameCallback", self.melPerFrameCallback + ), + "melPostJobCallback": attribute_values.get( + "melPostJobCallback", self.melPostJobCallback + ), + "pythonPerFrameCallback": attribute_values.get( + "pythonPerFrameCallback", self.pythonPostJobCallback + ), + "pythonPostJobCallback": attribute_values.get( + "pythonPostJobCallback", self.pythonPostJobCallback + ), + # Note that this converts `writeNormals` to `noNormals` for the + # `AbcExport` equivalent in `extract_alembic` + "noNormals": not attribute_values.get( + "writeNormals", self.writeNormals + ), + } if instance.data.get("visibleOnly", False): # If we only want to include nodes that are visible in the frame @@ -87,20 +202,19 @@ class ExtractAlembic(publish.Extractor): # flag does not filter out those that are only hidden on some # frames as it counts "animated" or "connected" visibilities as # if it's always visible. - nodes = list(iter_visible_nodes_in_range(nodes, - start=start, - end=end)) + nodes = list( + iter_visible_nodes_in_range(nodes, start=start, end=end) + ) suspend = not instance.data.get("refresh", False) with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(nodes, noExpand=True) - extract_alembic( - file=path, - startFrame=start, - endFrame=end, - **options + self.log.debug( + "Running `extract_alembic` with the keyword arguments: " + "{}".format(kwargs) ) + extract_alembic(**kwargs) if "representations" not in instance.data: instance.data["representations"] = [] @@ -124,22 +238,17 @@ class ExtractAlembic(publish.Extractor): return path = path.replace(".abc", "_proxy.abc") + kwargs["file"] = path if not instance.data.get("includeParentHierarchy", True): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = instance.data["proxyRoots"] + kwargs["root"] = instance.data["proxyRoots"] with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(instance.data["proxy"]) - extract_alembic( - file=path, - startFrame=start, - endFrame=end, - **options - ) - + extract_alembic(**kwargs) representation = { "name": "proxy", "ext": "abc", @@ -152,24 +261,265 @@ class ExtractAlembic(publish.Extractor): def get_members_and_roots(self, instance): return instance[:], instance.data.get("setMembers") + @classmethod + def get_attribute_defs(cls): + if not cls.overrides: + return [] + + override_defs = OrderedDict({ + "eulerFilter": BoolDef( + "eulerFilter", + label="Euler Filter", + default=cls.eulerFilter, + tooltip="Apply Euler filter while sampling rotations." + ), + "renderableOnly": BoolDef( + "renderableOnly", + label="Renderable Only", + default=cls.renderableOnly, + tooltip="Only export renderable visible shapes." + ), + "stripNamespaces": BoolDef( + "stripNamespaces", + label="Strip Namespaces", + default=cls.stripNamespaces, + tooltip=( + "Namespaces will be stripped off of the node before being " + "written to Alembic." + ) + ), + "uvsOnly": BoolDef( + "uvsOnly", + label="UVs Only", + default=cls.uvsOnly, + tooltip=( + "If this flag is present, only uv data for PolyMesh and " + "SubD shapes will be written to the Alembic file." + ) + ), + "uvWrite": BoolDef( + "uvWrite", + label="UV Write", + default=cls.uvWrite, + tooltip=( + "Uv data for PolyMesh and SubD shapes will be written to " + "the Alembic file." + ) + ), + "verbose": BoolDef( + "verbose", + label="Verbose", + default=cls.verbose, + tooltip="Prints the current frame that is being evaluated." + ), + "visibleOnly": BoolDef( + "visibleOnly", + label="Visible Only", + default=cls.visibleOnly, + tooltip="Only export dag objects visible during frame range." + ), + "wholeFrameGeo": BoolDef( + "wholeFrameGeo", + label="Whole Frame Geo", + default=cls.wholeFrameGeo, + tooltip=( + "Data for geometry will only be written out on whole " + "frames." + ) + ), + "worldSpace": BoolDef( + "worldSpace", + label="World Space", + default=cls.worldSpace, + tooltip="Any root nodes will be stored in world space." + ), + "writeColorSets": BoolDef( + "writeColorSets", + label="Write Color Sets", + default=cls.writeColorSets, + tooltip="Write vertex colors with the geometry." + ), + "writeCreases": BoolDef( + "writeCreases", + label="Write Creases", + default=cls.writeCreases, + tooltip="Write the geometry's edge and vertex crease " + "information." + ), + "writeFaceSets": BoolDef( + "writeFaceSets", + label="Write Face Sets", + default=cls.writeFaceSets, + tooltip="Write face sets with the geometry." + ), + "writeNormals": BoolDef( + "writeNormals", + label="Write Normals", + default=cls.writeNormals, + tooltip="Write normals with the deforming geometry." + ), + "writeUVSets": BoolDef( + "writeUVSets", + label="Write UV Sets", + default=cls.writeUVSets, + tooltip=( + "Write all uv sets on MFnMeshes as vector 2 indexed " + "geometry parameters with face varying scope." + ) + ), + "writeVisibility": BoolDef( + "writeVisibility", + label="Write Visibility", + default=cls.writeVisibility, + tooltip=( + "Visibility state will be stored in the Alembic file. " + "Otherwise everything written out is treated as visible." + ) + ), + "preRoll": BoolDef( + "preRoll", + label="Pre Roll", + default=cls.preRoll, + tooltip="This frame range will not be sampled." + ), + "preRollStartFrame": NumberDef( + "preRollStartFrame", + label="Pre Roll Start Frame", + tooltip=( + "The frame to start scene evaluation at. This is used" + " to set the starting frame for time dependent " + "translations and can be used to evaluate run-up that" + " isn't actually translated." + ), + default=cls.preRollStartFrame + ), + "dataFormat": EnumDef( + "dataFormat", + label="Data Format", + items=["ogawa", "HDF"], + default=cls.dataFormat, + tooltip="The data format to use to write the file." + ), + "attr": TextDef( + "attr", + label="Custom Attributes", + placeholder="attr1; attr2; ...", + default=cls.attr, + tooltip=( + "Attributes matching by name will be included in the " + "Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "attrPrefix": TextDef( + "attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1; prefix2; ...", + default=cls.attrPrefix, + tooltip=( + "Attributes starting with these prefixes will be included " + "in the Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "userAttr": TextDef( + "userAttr", + label="User Attr", + placeholder="attr1; attr2; ...", + default=cls.userAttr, + tooltip=( + "Attributes matching by name will be included in the " + "Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "userAttrPrefix": TextDef( + "userAttrPrefix", + label="User Attr Prefix", + placeholder="prefix1; prefix2; ...", + default=cls.userAttrPrefix, + tooltip=( + "Attributes starting with these prefixes will be included " + "in the Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "melPerFrameCallback": TextDef( + "melPerFrameCallback", + label="Mel Per Frame Callback", + default=cls.melPerFrameCallback, + tooltip=( + "When each frame (and the static frame) is evaluated the " + "string specified is evaluated as a Mel command." + ) + ), + "melPostJobCallback": TextDef( + "melPostJobCallback", + label="Mel Post Job Callback", + default=cls.melPostJobCallback, + tooltip=( + "When the translation has finished the string specified " + "is evaluated as a Mel command." + ) + ), + "pythonPerFrameCallback": TextDef( + "pythonPerFrameCallback", + label="Python Per Frame Callback", + default=cls.pythonPerFrameCallback, + tooltip=( + "When each frame (and the static frame) is evaluated the " + "string specified is evaluated as a python command." + ) + ), + "pythonPostJobCallback": TextDef( + "pythonPostJobCallback", + label="Python Post Frame Callback", + default=cls.pythonPostJobCallback, + tooltip=( + "When the translation has finished the string specified " + "is evaluated as a python command." + ) + ) + }) + + defs = super(ExtractAlembic, cls).get_attribute_defs() + + defs.extend([ + UISeparatorDef("sep_alembic_options"), + UILabelDef("Alembic Options"), + ]) + + # The Arguments that can be modified by the Publisher + overrides = set(cls.overrides) + for key, value in override_defs.items(): + if key not in overrides: + continue + + defs.append(value) + + defs.append( + UISeparatorDef("sep_alembic_options_end") + ) + + return defs + class ExtractAnimation(ExtractAlembic): - label = "Extract Animation" + label = "Extract Animation (Alembic)" families = ["animation"] def get_members_and_roots(self, instance): - # Collect the out set nodes out_sets = [node for node in instance if node.endswith("out_SET")] if len(out_sets) != 1: - raise RuntimeError("Couldn't find exactly one out_SET: " - "{0}".format(out_sets)) + raise KnownPublishError( + "Couldn't find exactly one out_SET: {0}".format(out_sets) + ) out_set = out_sets[0] - roots = cmds.sets(out_set, query=True) + roots = cmds.sets(out_set, query=True) or [] # Include all descendants - nodes = roots + cmds.listRelatives(roots, - allDescendents=True, - fullPath=True) or [] + nodes = roots.copy() + nodes.extend(get_all_children(roots, ignore_intermediate_objects=True)) return nodes, roots diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py index 3637a58614..5aefdfc33a 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py @@ -3,8 +3,8 @@ import os from maya import cmds from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, suspended_refresh, maintained_selection, iter_visible_nodes_in_range diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py index 1a389f3d33..b5cc7745a1 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py @@ -5,8 +5,8 @@ import os from maya import cmds # noqa from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, suspended_refresh, maintained_selection ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py index d799486184..54d295b479 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py @@ -5,7 +5,7 @@ import copy from maya import cmds import pyblish.api -from ayon_core.hosts.maya.api.lib import extract_alembic +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.pipeline import publish diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py new file mode 100644 index 0000000000..11f4c313fa --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py @@ -0,0 +1,130 @@ +import inspect +import pyblish.api + +from ayon_core.pipeline import OptionalPyblishPluginMixin +from ayon_core.pipeline.publish import RepairAction, PublishValidationError + + +class ValidateAlembicDefaultsPointcache( + pyblish.api.InstancePlugin, OptionalPyblishPluginMixin +): + """Validate the attributes on the instance are defaults. + + The defaults are defined in the project settings. + """ + + order = pyblish.api.ValidatorOrder + families = ["pointcache"] + hosts = ["maya"] + label = "Validate Alembic Options Defaults" + actions = [RepairAction] + optional = True + + plugin_name = "ExtractAlembic" + + @classmethod + def _get_settings(cls, context): + maya_settings = context.data["project_settings"]["maya"] + settings = maya_settings["publish"]["ExtractAlembic"] + return settings + + @classmethod + def _get_publish_attributes(cls, instance): + return instance.data["publish_attributes"][cls.plugin_name] + + def process(self, instance): + if not self.is_active(instance.data): + return + + settings = self._get_settings(instance.context) + attributes = self._get_publish_attributes(instance) + + invalid = {} + for key, value in attributes.items(): + if key not in settings: + # This may occur if attributes have changed over time and an + # existing instance has older legacy attributes that do not + # match the current settings definition. + self.log.warning( + "Publish attribute %s not found in Alembic Export " + "default settings. Ignoring validation for attribute.", + key + ) + continue + + default_value = settings[key] + + # Lists are best to compared sorted since we cant rely on the order + # of the items. + if isinstance(value, list): + value = sorted(value) + default_value = sorted(default_value) + + if value != default_value: + invalid[key] = value, default_value + + if invalid: + non_defaults = "\n".join( + f"- {key}: {value} \t(default: {default_value})" + for key, (value, default_value) in invalid.items() + ) + + raise PublishValidationError( + "Alembic extract options differ from default values:\n" + f"{non_defaults}", + description=self.get_description() + ) + + @staticmethod + def get_description(): + return inspect.cleandoc( + """### Alembic Extract settings differ from defaults + + The alembic export options differ from the project default values. + + If this is intentional you can disable this validation by + disabling **Validate Alembic Options Default**. + + If not you may use the "Repair" action to revert all the options to + their default values. + + """ + ) + + @classmethod + def repair(cls, instance): + # Find create instance twin. + create_context = instance.context.data["create_context"] + create_instance = create_context.get_instance_by_id( + instance.data["instance_id"] + ) + + # Set the settings values on the create context then save to workfile. + settings = cls._get_settings(instance.context) + attributes = cls._get_publish_attributes(create_instance) + for key in attributes: + if key not in settings: + # This may occur if attributes have changed over time and an + # existing instance has older legacy attributes that do not + # match the current settings definition. + cls.log.warning( + "Publish attribute %s not found in Alembic Export " + "default settings. Ignoring repair for attribute.", + key + ) + continue + attributes[key] = settings[key] + + create_context.save_changes() + + +class ValidateAlembicDefaultsAnimation( + ValidateAlembicDefaultsPointcache +): + """Validate the attributes on the instance are defaults. + + The defaults are defined in the project settings. + """ + label = "Validate Alembic Options Defaults" + families = ["animation"] + plugin_name = "ExtractAnimation" diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py b/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py deleted file mode 100644 index 2ba2bff6fc..0000000000 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_animated_reference.py +++ /dev/null @@ -1,71 +0,0 @@ -import pyblish.api -import ayon_core.hosts.maya.api.action -from ayon_core.pipeline.publish import ( - PublishValidationError, - ValidateContentsOrder, - OptionalPyblishPluginMixin -) -from maya import cmds - - -class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin, - OptionalPyblishPluginMixin): - """Validate all nodes in skeletonAnim_SET are referenced""" - - order = ValidateContentsOrder - hosts = ["maya"] - families = ["animation.fbx"] - label = "Animated Reference Rig" - accepted_controllers = ["transform", "locator"] - actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction] - optional = False - - def process(self, instance): - if not self.is_active(instance.data): - return - animated_sets = instance.data.get("animated_skeleton", []) - if not animated_sets: - self.log.debug( - "No nodes found in skeletonAnim_SET. " - "Skipping validation of animated reference rig..." - ) - return - - for animated_reference in animated_sets: - is_referenced = cmds.referenceQuery( - animated_reference, isNodeReferenced=True) - if not bool(is_referenced): - raise PublishValidationError( - "All the content in skeletonAnim_SET" - " should be referenced nodes" - ) - invalid_controls = self.validate_controls(animated_sets) - if invalid_controls: - raise PublishValidationError( - "All the content in skeletonAnim_SET" - " should be transforms" - ) - - @classmethod - def validate_controls(self, set_members): - """Check if the controller set contains only accepted node types. - - Checks if all its set members are within the hierarchy of the root - Checks if the node types of the set members valid - - Args: - set_members: list of nodes of the skeleton_anim_set - hierarchy: list of nodes which reside under the root node - - Returns: - errors (list) - """ - - # Validate control types - invalid = [] - set_members = cmds.ls(set_members, long=True) - for node in set_members: - if cmds.nodeType(node) not in self.accepted_controllers: - invalid.append(node) - - return invalid diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py index 92b4922492..8574b3ecc8 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -1,30 +1,56 @@ +from maya import cmds + import pyblish.api + from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) +from ayon_core.hosts.maya.api.lib import is_visible class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): """Validate Arnold Scene Source. - We require at least 1 root node/parent for the meshes. This is to ensure we - can duplicate the nodes and preserve the names. + Ensure no nodes are hidden. + """ - If using proxies we need the nodes to share the same names and not be + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass", "assProxy"] + label = "Validate Arnold Scene Source" + + def process(self, instance): + # Validate against having nodes hidden, which will result in the + # extraction to ignore the node. + nodes = instance.data["members"] + instance.data.get("proxy", []) + nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')] + hidden_nodes = [ + x for x in nodes if not is_visible(x, intermediateObject=False) + ] + if hidden_nodes: + raise PublishValidationError( + "Found hidden nodes:\n\n{}\n\nPlease unhide for" + " publishing.".format("\n".join(hidden_nodes)) + ) + + +class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source Proxy. + + When using proxies we need the nodes to share the same names and not be parent to the world. This ends up needing at least two groups with content nodes and proxy nodes in another. """ order = ValidateContentsOrder hosts = ["maya"] - families = ["ass"] - label = "Validate Arnold Scene Source" + families = ["assProxy"] + label = "Validate Arnold Scene Source Proxy" def _get_nodes_by_name(self, nodes): ungrouped_nodes = [] nodes_by_name = {} parents = [] - same_named_nodes = {} for node in nodes: node_split = node.split("|") if len(node_split) == 2: @@ -35,33 +61,16 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): parents.append(parent) node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - - # Check for same same nodes, which can happen in different - # hierarchies. - if node_name in nodes_by_name: - try: - same_named_nodes[node_name].append(node) - except KeyError: - same_named_nodes[node_name] = [ - nodes_by_name[node_name], node - ] - nodes_by_name[node_name] = node - if same_named_nodes: - message = "Found nodes with the same name:" - for name, nodes in same_named_nodes.items(): - message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes)) - - raise PublishValidationError(message) - return ungrouped_nodes, nodes_by_name, parents def process(self, instance): + # Validate against nodes directly parented to world. ungrouped_nodes = [] nodes, content_nodes_by_name, content_parents = ( - self._get_nodes_by_name(instance.data["contentMembers"]) + self._get_nodes_by_name(instance.data["members"]) ) ungrouped_nodes.extend(nodes) @@ -70,24 +79,21 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): ) ungrouped_nodes.extend(nodes) - # Validate against nodes directly parented to world. if ungrouped_nodes: raise PublishValidationError( "Found nodes parented to the world: {}\n" "All nodes need to be grouped.".format(ungrouped_nodes) ) - # Proxy validation. - if not instance.data.get("proxy", []): - return - # Validate for content and proxy nodes amount being the same. - if len(instance.data["contentMembers"]) != len(instance.data["proxy"]): + if len(instance.data["members"]) != len(instance.data["proxy"]): raise PublishValidationError( "Amount of content nodes ({}) and proxy nodes ({}) needs to " - "be the same.".format( - len(instance.data["contentMembers"]), - len(instance.data["proxy"]) + "be the same.\nContent nodes: {}\nProxy nodes:{}".format( + len(instance.data["members"]), + len(instance.data["proxy"]), + instance.data["members"], + instance.data["proxy"] ) ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py index a9d896952d..e5dbe178fc 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py @@ -17,7 +17,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, order = ValidateContentsOrder hosts = ["maya"] - families = ["ass"] + families = ["assProxy"] label = "Validate Arnold Scene Source CBID" actions = [RepairAction] optional = False @@ -40,15 +40,11 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, @classmethod def get_invalid_couples(cls, instance): - content_nodes_by_name = cls._get_nodes_by_name( - instance.data["contentMembers"] - ) - proxy_nodes_by_name = cls._get_nodes_by_name( - instance.data.get("proxy", []) - ) + nodes_by_name = cls._get_nodes_by_name(instance.data["members"]) + proxy_nodes_by_name = cls._get_nodes_by_name(instance.data["proxy"]) invalid_couples = [] - for content_name, content_node in content_nodes_by_name.items(): + for content_name, content_node in nodes_by_name.items(): proxy_node = proxy_nodes_by_name.get(content_name, None) if not proxy_node: @@ -70,7 +66,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, if not self.is_active(instance.data): return # Proxy validation. - if not instance.data.get("proxy", []): + if not instance.data["proxy"]: return # Validate for proxy nodes sharing the same cbId as content nodes. diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py index 78a247b3f2..7badfdc027 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py @@ -10,6 +10,7 @@ from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError, + OptionalPyblishPluginMixin ) from ayon_core.hosts.maya.api import lib from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings @@ -37,7 +38,8 @@ def get_redshift_image_format_labels(): return mel.eval("{0}={0}".format(var)) -class ValidateRenderSettings(pyblish.api.InstancePlugin): +class ValidateRenderSettings(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validates the global render settings * File Name Prefix must start with: `` @@ -55,7 +57,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): * Frame Padding must be: * default: 4 - * Animation must be toggle on, in Render Settings - Common tab: + * Animation must be toggled on, in Render Settings - Common tab: * vray: Animation on standard of specific * arnold: Frame / Animation ext: Any choice without "(Single Frame)" * redshift: Animation toggled on @@ -67,10 +69,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): """ order = ValidateContentsOrder - label = "Render Settings" + label = "Validate Render Settings" hosts = ["maya"] families = ["renderlayer"] actions = [RepairAction] + optional = True ImagePrefixes = { 'mentalray': 'defaultRenderGlobals.imageFilePrefix', @@ -112,6 +115,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): DEFAULT_PREFIX = "//_" def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..b07c7e9a70 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,132 @@ +from maya import cmds + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + PlaceholderLoadMixin, + LoadPlaceholderItem +) +from ayon_core.hosts.maya.api.lib import ( + get_container_transforms, + get_node_parent, + get_node_index_under_parent +) +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaPlaceholderPlugin, +) + + +class MayaPlaceholderLoadPlugin(MayaPlaceholderPlugin, PlaceholderLoadMixin): + identifier = "maya.load" + label = "Maya load" + + item_class = LoadPlaceholderItem + + def _create_placeholder_name(self, placeholder_data): + + # Split builder type: context_assets, linked_assets, all_assets + prefix, suffix = placeholder_data["builder_type"].split("_", 1) + parts = [prefix] + + # add family if any + placeholder_product_type = placeholder_data.get("product_type") + if placeholder_product_type is None: + placeholder_product_type = placeholder_data.get("family") + + if placeholder_product_type: + parts.append(placeholder_product_type) + + # add loader arguments if any + loader_args = placeholder_data["loader_args"] + if loader_args: + loader_args = eval(loader_args) + for value in loader_args.values(): + parts.append(str(value)) + + parts.append(suffix) + placeholder_name = "_".join(parts) + + return placeholder_name.capitalize() + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + try: + containers = cmds.sets("AVALON_CONTAINERS", q=True) + except ValueError: + containers = [] + + loaded_representation_ids = { + cmds.getAttr(container + ".representation") + for container in containers + } + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def load_succeed(self, placeholder, container): + self._parent_in_hierarchy(placeholder, container) + + def _parent_in_hierarchy(self, placeholder, container): + """Parent loaded container to placeholder's parent. + + ie : Set loaded content as placeholder's sibling + + Args: + container (str): Placeholder loaded containers + """ + + if not container: + return + + # TODO: This currently returns only a single root but a loaded scene + # could technically load more than a single root + container_root = get_container_transforms(container, root=True) + + # Bugfix: The get_container_transforms does not recognize the load + # reference group currently + # TODO: Remove this when it does + parent = get_node_parent(container_root) + if parent: + container_root = parent + roots = [container_root] + + # Add the loaded roots to the holding sets if they exist + holding_sets = cmds.listSets(object=placeholder.scene_identifier) or [] + for holding_set in holding_sets: + cmds.sets(roots, forceElement=holding_set) + + # Parent the roots to the place of the placeholder locator and match + # its matrix + placeholder_form = cmds.xform( + placeholder.scene_identifier, + query=True, + matrix=True, + worldSpace=True + ) + scene_parent = get_node_parent(placeholder.scene_identifier) + for node in set(roots): + cmds.xform(node, matrix=placeholder_form, worldSpace=True) + + if scene_parent != get_node_parent(node): + if scene_parent: + node = cmds.parent(node, scene_parent)[0] + else: + node = cmds.parent(node, world=True)[0] + + # Move loaded nodes in index order next to their placeholder node + cmds.reorder(node, back=True) + index = get_node_index_under_parent(placeholder.scene_identifier) + cmds.reorder(node, front=True) + cmds.reorder(node, relative=index + 1) diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py new file mode 100644 index 0000000000..62e10ba023 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py @@ -0,0 +1,201 @@ +from maya import cmds + +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaPlaceholderPlugin +) +from ayon_core.lib import NumberDef, TextDef, EnumDef +from ayon_core.lib.events import weakref_partial + + +EXAMPLE_SCRIPT = """ +# Access maya commands +from maya import cmds + +# Access the placeholder node +placeholder_node = placeholder.scene_identifier + +# Access the event callback +if event is None: + print(f"Populating {placeholder}") +else: + if event.topic == "template.depth_processed": + print(f"Processed depth: {event.get('depth')}") + elif event.topic == "template.finished": + print("Build finished.") +""".strip() + + +class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin): + """Execute a script at the given `order` during workfile build. + + This is a very low-level placeholder to run Python scripts at a given + point in time during the workfile template build. + + It can create either a locator or an objectSet as placeholder node. + It defaults to an objectSet, since allowing to run on e.g. other + placeholder node members can be useful, e.g. using: + + >>> members = cmds.sets(placeholder.scene_identifier, query=True) + + """ + + identifier = "maya.runscript" + label = "Run Python Script" + + use_selection_as_parent = False + + def get_placeholder_options(self, options=None): + options = options or {} + return [ + NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines asset loading priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ), + TextDef( + "prepare_script", + label="Run at\nprepare", + tooltip="Run before populate at prepare order", + multiline=True, + default=options.get("prepare_script", "") + ), + TextDef( + "populate_script", + label="Run at\npopulate", + tooltip="Run script at populate node order
" + "This is the default behavior", + multiline=True, + default=options.get("populate_script", EXAMPLE_SCRIPT) + ), + TextDef( + "depth_processed_script", + label="Run after\ndepth\niteration", + tooltip="Run script after every build depth iteration", + multiline=True, + default=options.get("depth_processed_script", "") + ), + TextDef( + "finished_script", + label="Run after\nbuild", + tooltip=( + "Run script at build finished.
" + "Note: this even runs if other placeholders had " + "errors during the build" + ), + multiline=True, + default=options.get("finished_script", "") + ), + EnumDef( + "create_nodetype", + label="Nodetype", + items={ + "spaceLocator": "Locator", + "objectSet": "ObjectSet" + }, + tooltip=( + "The placeholder's node type to be created.
" + "Note this only works on create, not on update" + ), + default=options.get("create_nodetype", "objectSet") + ), + ] + + def create_placeholder(self, placeholder_data): + nodetype = placeholder_data.get("create_nodetype", "objectSet") + + if nodetype == "spaceLocator": + super(MayaPlaceholderScriptPlugin, self).create_placeholder( + placeholder_data + ) + elif nodetype == "objectSet": + placeholder_data["plugin_identifier"] = self.identifier + + # Create maya objectSet on selection + selection = cmds.ls(selection=True, long=True) + name = self._create_placeholder_name(placeholder_data) + node = cmds.sets(selection, name=name) + + self.imprint(node, placeholder_data) + + def prepare_placeholders(self, placeholders): + super(MayaPlaceholderScriptPlugin, self).prepare_placeholders( + placeholders + ) + for placeholder in placeholders: + prepare_script = placeholder.data.get("prepare_script") + if not prepare_script: + continue + + self.run_script(placeholder, prepare_script) + + def populate_placeholder(self, placeholder): + + populate_script = placeholder.data.get("populate_script") + depth_script = placeholder.data.get("depth_processed_script") + finished_script = placeholder.data.get("finished_script") + + # Run now + if populate_script: + self.run_script(placeholder, populate_script) + + if not any([depth_script, finished_script]): + # No callback scripts to run + if not placeholder.data.get("keep_placeholder", True): + self.delete_placeholder(placeholder) + return + + # Run at each depth processed + if depth_script: + callback = weakref_partial( + self.run_script, placeholder, depth_script) + self.builder.add_on_depth_processed_callback( + callback, order=placeholder.order) + + # Run at build finish + if finished_script: + callback = weakref_partial( + self.run_script, placeholder, finished_script) + self.builder.add_on_finished_callback( + callback, order=placeholder.order) + + # If placeholder should be deleted, delete it after finish so + # the scripts have access to it up to the last run + if not placeholder.data.get("keep_placeholder", True): + delete_callback = weakref_partial( + self.delete_placeholder, placeholder) + self.builder.add_on_finished_callback( + delete_callback, order=placeholder.order + 1) + + def run_script(self, placeholder, script, event=None): + """Run script + + Even though `placeholder` is an unused arguments by exposing it as + an input argument it means it makes it available through + globals()/locals() in the `exec` call, giving the script access + to the placeholder. + + For example: + >>> node = placeholder.scene_identifier + + In the case the script is running at a callback level (not during + populate) then it has access to the `event` as well, otherwise the + value is None if it runs during `populate_placeholder` directly. + + For example adding this as the callback script: + >>> if event is not None: + >>> if event.topic == "on_depth_processed": + >>> print(f"Processed depth: {event.get('depth')}") + >>> elif event.topic == "on_finished": + >>> print("Build finished.") + + """ + self.log.debug(f"Running script at event: {event}") + exec(script, locals()) diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py index 88ef4b201a..c1d9f019e4 100644 --- a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py @@ -7,7 +7,7 @@ from maya import cmds import ayon_api from ayon_core.pipeline import get_current_project_name -import ayon_core.hosts.maya.lib as maya_lib +import ayon_core.hosts.maya.api.lib as maya_lib from . import lib from .alembic import get_alembic_ids_cache diff --git a/client/ayon_core/hosts/nuke/api/lib.py b/client/ayon_core/hosts/nuke/api/lib.py index 78cbe85097..0a4755c166 100644 --- a/client/ayon_core/hosts/nuke/api/lib.py +++ b/client/ayon_core/hosts/nuke/api/lib.py @@ -43,7 +43,9 @@ from ayon_core.pipeline import ( from ayon_core.pipeline.context_tools import ( get_current_context_custom_workfile_template ) -from ayon_core.pipeline.colorspace import get_imageio_config +from ayon_core.pipeline.colorspace import ( + get_current_context_imageio_config_preset +) from ayon_core.pipeline.workfile import BuildWorkfile from . import gizmo_menu from .constants import ASSIST @@ -1495,18 +1497,28 @@ class WorkfileSettings(object): filter_knobs = [ "viewerProcess", - "wipe_position" + "wipe_position", + "monitorOutOutputTransform" ] + display, viewer = get_viewer_config_from_string( + viewer_dict["viewerProcess"] + ) + viewer_process = create_viewer_profile_string( + viewer, display, path_like=False + ) + display, viewer = get_viewer_config_from_string( + viewer_dict["output_transform"] + ) + output_transform = create_viewer_profile_string( + viewer, display, path_like=False + ) erased_viewers = [] for v in nuke.allNodes(filter="Viewer"): # set viewProcess to preset from settings - v["viewerProcess"].setValue( - str(viewer_dict["viewerProcess"]) - ) + v["viewerProcess"].setValue(viewer_process) - if str(viewer_dict["viewerProcess"]) \ - not in v["viewerProcess"].value(): + if viewer_process not in v["viewerProcess"].value(): copy_inputs = v.dependencies() copy_knobs = {k: v[k].value() for k in v.knobs() if k not in filter_knobs} @@ -1524,11 +1536,11 @@ class WorkfileSettings(object): # set copied knobs for k, v in copy_knobs.items(): - print(k, v) nv[k].setValue(v) # set viewerProcess - nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"])) + nv["viewerProcess"].setValue(viewer_process) + nv["monitorOutOutputTransform"].setValue(output_transform) if erased_viewers: log.warning( @@ -1542,12 +1554,8 @@ class WorkfileSettings(object): imageio_host (dict): host colorspace configurations ''' - config_data = get_imageio_config( - project_name=get_current_project_name(), - host_name="nuke" - ) + config_data = get_current_context_imageio_config_preset() - viewer_process_settings = imageio_host["viewer"]["viewerProcess"] workfile_settings = imageio_host["workfile"] color_management = workfile_settings["color_management"] native_ocio_config = workfile_settings["native_ocio_config"] @@ -1574,29 +1582,6 @@ class WorkfileSettings(object): residual_path )) - # get monitor lut from settings respecting Nuke version differences - monitor_lut = workfile_settings["thumbnail_space"] - monitor_lut_data = self._get_monitor_settings( - viewer_process_settings, monitor_lut - ) - monitor_lut_data["workingSpaceLUT"] = ( - workfile_settings["working_space"] - ) - - # then set the rest - for knob, value_ in monitor_lut_data.items(): - # skip unfilled ocio config path - # it will be dict in value - if isinstance(value_, dict): - continue - # skip empty values - if not value_: - continue - if self._root_node[knob].value() not in value_: - self._root_node[knob].setValue(str(value_)) - log.debug("nuke.root()['{}'] changed to: {}".format( - knob, value_)) - # set ocio config path if config_data: config_path = config_data["path"].replace("\\", "/") @@ -1611,6 +1596,31 @@ class WorkfileSettings(object): if correct_settings: self._set_ocio_config_path_to_workfile(config_data) + # get monitor lut from settings respecting Nuke version differences + monitor_lut_data = self._get_monitor_settings( + workfile_settings["monitor_out_lut"], + workfile_settings["monitor_lut"] + ) + monitor_lut_data.update({ + "workingSpaceLUT": workfile_settings["working_space"], + "int8Lut": workfile_settings["int_8_lut"], + "int16Lut": workfile_settings["int_16_lut"], + "logLut": workfile_settings["log_lut"], + "floatLut": workfile_settings["float_lut"] + }) + + # then set the rest + for knob, value_ in monitor_lut_data.items(): + # skip unfilled ocio config path + # it will be dict in value + if isinstance(value_, dict): + continue + # skip empty values + if not value_: + continue + self._root_node[knob].setValue(str(value_)) + log.debug("nuke.root()['{}'] changed to: {}".format(knob, value_)) + def _get_monitor_settings(self, viewer_lut, monitor_lut): """ Get monitor settings from viewer and monitor lut diff --git a/client/ayon_core/hosts/nuke/api/pipeline.py b/client/ayon_core/hosts/nuke/api/pipeline.py index 0d44aba2f9..d35a2e89e0 100644 --- a/client/ayon_core/hosts/nuke/api/pipeline.py +++ b/client/ayon_core/hosts/nuke/api/pipeline.py @@ -18,6 +18,7 @@ from ayon_core.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, register_inventory_action_path, + register_workfile_build_plugin_path, AYON_INSTANCE_ID, AVALON_INSTANCE_ID, AVALON_CONTAINER_ID, @@ -52,8 +53,6 @@ from .lib import ( MENU_LABEL, ) from .workfile_template_builder import ( - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin, build_workfile_template, create_placeholder, update_placeholder, @@ -76,6 +75,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") # registering pyblish gui regarding settings in presets if os.getenv("PYBLISH_GUI", None): @@ -105,18 +105,11 @@ class NukeHost( def get_workfile_extensions(self): return file_extensions() - def get_workfile_build_placeholder_plugins(self): - return [ - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin - ] - def get_containers(self): return ls() def install(self): - ''' Installing all requarements for Nuke host - ''' + """Installing all requirements for Nuke host""" pyblish.api.register_host("nuke") @@ -125,6 +118,7 @@ class NukeHost( register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) # Register AYON event for workfiles loading. register_event_callback("workio.open_file", check_inventory_versions) @@ -178,7 +172,6 @@ def add_nuke_callbacks(): # set apply all workfile settings on script load and save nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) - if nuke_settings["dirmap"]["enabled"]: log.info("Added Nuke's dir-mapping callback ...") # Add dirmap for file paths. diff --git a/client/ayon_core/hosts/nuke/api/plugin.py b/client/ayon_core/hosts/nuke/api/plugin.py index 5b97fab0c2..ec13104d4d 100644 --- a/client/ayon_core/hosts/nuke/api/plugin.py +++ b/client/ayon_core/hosts/nuke/api/plugin.py @@ -778,6 +778,7 @@ class ExporterReviewMov(ExporterReview): # deal with now lut defined in viewer lut self.viewer_lut_raw = klass.viewer_lut_raw self.write_colorspace = instance.data["colorspace"] + self.color_channels = instance.data["color_channels"] self.name = name or "baked" self.ext = ext or "mov" @@ -834,7 +835,7 @@ class ExporterReviewMov(ExporterReview): self.log.info("Nodes exported...") return path - def generate_mov(self, farm=False, **kwargs): + def generate_mov(self, farm=False, delete=True, **kwargs): # colorspace data colorspace = None # get colorspace settings @@ -947,6 +948,8 @@ class ExporterReviewMov(ExporterReview): self.log.debug("Path: {}".format(self.path)) write_node["file"].setValue(str(self.path)) write_node["file_type"].setValue(str(self.ext)) + write_node["channels"].setValue(str(self.color_channels)) + # Knobs `meta_codec` and `mov64_codec` are not available on centos. # TODO shouldn't this come from settings on outputs? try: @@ -987,8 +990,13 @@ class ExporterReviewMov(ExporterReview): self.render(write_node.name()) # ---------- generate representation data + tags = ["review", "need_thumbnail"] + + if delete: + tags.append("delete") + self.get_representation_data( - tags=["review", "need_thumbnail", "delete"] + add_tags, + tags=tags + add_tags, custom_tags=add_custom_tags, range=True, colorspace=colorspace @@ -1151,7 +1159,6 @@ def _remove_old_knobs(node): "OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority", "deadlineChunkSize", "deadlineConcurrentTasks", "Deadline" ] - print(node.name()) # remove all old knobs for knob in node.allKnobs(): diff --git a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py index 495edd9e5f..aebf91c4a4 100644 --- a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py @@ -1,30 +1,17 @@ import collections import nuke + from ayon_core.pipeline import registered_host from ayon_core.pipeline.workfile.workfile_template_builder import ( AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin, ) from ayon_core.tools.workfile_template_build import ( WorkfileBuildPlaceholderDialog, ) from .lib import ( - find_free_space_to_paste_nodes, - get_extreme_positions, - get_group_io_nodes, imprint, - refresh_node, - refresh_nodes, reset_selection, - get_names_from_nodes, - get_nodes_by_names, - select_nodes, - duplicate_node, - node_tempfile, get_main_window, WorkfileSettings, ) @@ -54,6 +41,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder): return True + class NukePlaceholderPlugin(PlaceholderPlugin): node_color = 4278190335 @@ -120,843 +108,6 @@ class NukePlaceholderPlugin(PlaceholderPlugin): nuke.delete(placeholder_node) -class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): - identifier = "nuke.load" - label = "Nuke load" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderLoadPlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" - ) - if loaded_representation_ids is None: - loaded_representation_ids = set() - for node in nuke.allNodes(): - if "repre_id" in node.knobs(): - loaded_representation_ids.add( - node.knob("repre_id").getValue() - ) - - self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids - ) - return loaded_representation_ids - - def _before_placeholder_load(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def _before_repre_load(self, placeholder, representation): - placeholder.data["last_repre_id"] = representation["id"] - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - # TODO do data validations and maybe updgrades if are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - # TODO get from shared populate data! - nodes_init = placeholder.data["nodes_init"] - nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Loaded nodes: {}".format(nodes_loaded)) - if not nodes_loaded: - return - - placeholder.data["delete"] = True - - nodes_loaded = self._move_to_placeholder_group( - placeholder, nodes_loaded - ) - placeholder.data["last_loaded"] = nodes_loaded - refresh_nodes(nodes_loaded) - - # positioning of the loaded nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_loaded) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of loaded nodes - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) - - self._set_loaded_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new loaded nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_loaded) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_loaded, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the loaded - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_loaded, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_loaded): - """ - opening the placeholder's group and copying loaded nodes in it. - - Returns : - nodes_loaded (list): the new list of pasted nodes - """ - - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_loaded) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_loaded = nuke.selectedNodes() - return nodes_loaded - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is loaded.""" - - nodes_loaded = placeholder.data["last_loaded"] - loaded_backdrops = [] - bd_orders = set() - for node in nodes_loaded: - if isinstance(node, nuke.BackdropNode): - loaded_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in loaded_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes loaded with it) - - add Id to the attributes of all the other nodes - """ - - loaded_nodes = placeholder.data["last_loaded"] - loaded_nodes_set = set(loaded_nodes) - data = {"repre_id": str(placeholder.data["last_repre_id"])} - - for node in loaded_nodes: - node_knobs = node.knobs() - if "builder_type" not in node_knobs: - # save the id of representation for all imported nodes - imprint(node, data) - node.knob("repre_id").setVisible(False) - refresh_node(node) - continue - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(loaded_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_loaded_connections(self, placeholder): - """ - set inputs and outputs of loaded nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - loaded with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - -class NukePlaceholderCreatePlugin( - NukePlaceholderPlugin, PlaceholderCreateMixin -): - identifier = "nuke.create" - label = "Nuke create" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderCreatePlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _before_instance_create(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - - output.append( - CreatePlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - nodes_init = placeholder.data["nodes_init"] - nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Created nodes: {}".format(nodes_created)) - if not nodes_created: - return - - placeholder.data["delete"] = True - - nodes_created = self._move_to_placeholder_group( - placeholder, nodes_created - ) - placeholder.data["last_created"] = nodes_created - refresh_nodes(nodes_created) - - # positioning of the created nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_created) - for node in nodes_created: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_created) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of created nodes - - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_created) - - self._set_created_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new created nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_created) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_created, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the created - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_created, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_created: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_created): - """ - opening the placeholder's group and copying created nodes in it. - - Returns : - nodes_created (list): the new list of pasted nodes - """ - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_created) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_created = nuke.selectedNodes() - return nodes_created - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is create.""" - - nodes_created = placeholder.data["last_created"] - created_backdrops = [] - bd_orders = set() - for node in nodes_created: - if isinstance(node, nuke.BackdropNode): - created_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in created_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes created with it) - - add Id to the attributes of all the other nodes - """ - - created_nodes = placeholder.data["last_created"] - created_nodes_set = set(created_nodes) - - for node in created_nodes: - node_knobs = node.knobs() - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(created_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_created_connections(self, placeholder): - """ - set inputs and outputs of created nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_created"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - created with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_created"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - def build_workfile_template(*args, **kwargs): builder = NukeTemplateBuilder(registered_host()) builder.build_template(*args, **kwargs) diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py index 7d823919dc..50af8a4eb9 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_backdrop.py @@ -62,7 +62,7 @@ class LoadBackdropNodes(load.LoaderPlugin): } # add attributes from the version to imprint to metadata knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path @@ -206,7 +206,7 @@ class LoadBackdropNodes(load.LoaderPlugin): "colorspaceInput": colorspace, } - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # adding nodes to node graph diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py index 14c54c3adc..3c7d4f3bb2 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_camera_abc.py @@ -48,7 +48,7 @@ class AlembicCameraLoader(load.LoaderPlugin): "frameEnd": last, "version": version_entity["version"], } - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path @@ -123,7 +123,7 @@ class AlembicCameraLoader(load.LoaderPlugin): } # add attributes from the version to imprint to metadata knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_clip.py b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py index df8f2ab018..7fa90da86f 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_clip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_clip.py @@ -9,7 +9,8 @@ from ayon_core.pipeline import ( get_representation_path, ) from ayon_core.pipeline.colorspace import ( - get_imageio_file_rules_colorspace_from_filepath + get_imageio_file_rules_colorspace_from_filepath, + get_current_context_imageio_config_preset, ) from ayon_core.hosts.nuke.api.lib import ( get_imageio_input_colorspace, @@ -197,7 +198,6 @@ class LoadClip(plugin.NukeLoader): "frameStart", "frameEnd", "source", - "author", "fps", "handleStart", "handleEnd", @@ -347,8 +347,7 @@ class LoadClip(plugin.NukeLoader): "source": version_attributes.get("source"), "handleStart": str(self.handle_start), "handleEnd": str(self.handle_end), - "fps": str(version_attributes.get("fps")), - "author": version_attributes.get("author") + "fps": str(version_attributes.get("fps")) } last_version_entity = ayon_api.get_last_version_by_product_id( @@ -547,9 +546,10 @@ class LoadClip(plugin.NukeLoader): f"Colorspace from representation colorspaceData: {colorspace}" ) + config_data = get_current_context_imageio_config_preset() # check if any filerules are not applicable new_parsed_colorspace = get_imageio_file_rules_colorspace_from_filepath( # noqa - filepath, "nuke", project_name + filepath, "nuke", project_name, config_data=config_data ) self.log.debug(f"Colorspace new filerules: {new_parsed_colorspace}") diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_effects.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py index a87c81295a..be7420fcf0 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_effects.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects.py @@ -69,7 +69,6 @@ class LoadEffects(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -189,7 +188,6 @@ class LoadEffects(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps", ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py index 8fa1347598..9bb430b37b 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_effects_ip.py @@ -69,7 +69,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -192,7 +191,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py index 95f85bacfc..57d00795ae 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo.py @@ -71,7 +71,6 @@ class LoadGizmo(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -139,7 +138,6 @@ class LoadGizmo(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py index 3112e27811..ed2b1ec458 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -73,7 +73,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -145,7 +144,6 @@ class LoadGizmoInputProcess(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_image.py b/client/ayon_core/hosts/nuke/plugins/load/load_image.py index d825b621fc..b5fccd8a0d 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_image.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_image.py @@ -133,7 +133,7 @@ class LoadImage(load.LoaderPlugin): "version": version_entity["version"], "colorspace": colorspace, } - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes.get(k, str(None)) r["tile_color"].setValue(int("0x4ecd25ff", 16)) @@ -207,7 +207,6 @@ class LoadImage(load.LoaderPlugin): "colorspace": version_attributes.get("colorSpace"), "source": version_attributes.get("source"), "fps": str(version_attributes.get("fps")), - "author": version_attributes.get("author") } # change color of node diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_model.py b/client/ayon_core/hosts/nuke/plugins/load/load_model.py index 0326e0a4fc..40862cd1e0 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_model.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_model.py @@ -47,7 +47,7 @@ class AlembicModelLoader(load.LoaderPlugin): "version": version_entity["version"] } # add attributes from the version to imprint to metadata knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path @@ -130,7 +130,7 @@ class AlembicModelLoader(load.LoaderPlugin): } # add additional metadata from the version to imprint to Avalon knob - for k in ["source", "author", "fps"]: + for k in ["source", "fps"]: data_imprint[k] = version_attributes[k] # getting file path diff --git a/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py index 3e554f9d3b..d6699be164 100644 --- a/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py +++ b/client/ayon_core/hosts/nuke/plugins/load/load_script_precomp.py @@ -55,7 +55,6 @@ class LinkAsGroup(load.LoaderPlugin): "handleStart", "handleEnd", "source", - "author", "fps" ]: data_imprint[k] = version_attributes[k] @@ -131,7 +130,6 @@ class LinkAsGroup(load.LoaderPlugin): "colorspace": version_attributes.get("colorSpace"), "source": version_attributes.get("source"), "fps": version_attributes.get("fps"), - "author": version_attributes.get("author") } # Update the imprinted representation diff --git a/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py index 745351dc49..27525bcad1 100644 --- a/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/collect_writes.py @@ -153,6 +153,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin, # Determine defined file type ext = write_node["file_type"].value() + # determine defined channel type + color_channels = write_node["channels"].value() + # get frame range data handle_start = instance.context.data["handleStart"] handle_end = instance.context.data["handleEnd"] @@ -172,7 +175,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin, "path": write_file_path, "outputDir": output_dir, "ext": ext, - "colorspace": colorspace + "colorspace": colorspace, + "color_channels": color_channels }) if product_type == "render": diff --git a/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py index 8d7a3ec311..82c7b6e4c5 100644 --- a/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py +++ b/client/ayon_core/hosts/nuke/plugins/publish/extract_review_intermediates.py @@ -136,11 +136,16 @@ class ExtractReviewIntermediates(publish.Extractor): self, instance, o_name, o_data["extension"], multiple_presets) + o_data["add_custom_tags"].append("intermediate") + delete = not o_data.get("publish", False) + if instance.data.get("farm"): if "review" in instance.data["families"]: instance.data["families"].remove("review") - data = exporter.generate_mov(farm=True, **o_data) + data = exporter.generate_mov( + farm=True, delete=delete, **o_data + ) self.log.debug( "_ data: {}".format(data)) @@ -154,7 +159,7 @@ class ExtractReviewIntermediates(publish.Extractor): "bakeWriteNodeName": data.get("bakeWriteNodeName") }) else: - data = exporter.generate_mov(**o_data) + data = exporter.generate_mov(delete=delete, **o_data) # add representation generated by exporter generated_repres.extend(data["representations"]) diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py new file mode 100644 index 0000000000..a5490021e4 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py @@ -0,0 +1,428 @@ +import nuke + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + CreatePlaceholderItem, + PlaceholderCreateMixin, +) +from ayon_core.hosts.nuke.api.lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, +) +from ayon_core.hosts.nuke.api.workfile_template_builder import ( + NukePlaceholderPlugin +) + + +class NukePlaceholderCreatePlugin( + NukePlaceholderPlugin, PlaceholderCreateMixin +): + identifier = "nuke.create" + label = "Nuke create" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderCreatePlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _before_instance_create(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + + output.append( + CreatePlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + nodes_init = placeholder.data["nodes_init"] + nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Created nodes: {}".format(nodes_created)) + if not nodes_created: + return + + placeholder.data["delete"] = True + + nodes_created = self._move_to_placeholder_group( + placeholder, nodes_created + ) + placeholder.data["last_created"] = nodes_created + refresh_nodes(nodes_created) + + # positioning of the created nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_created) + for node in nodes_created: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_created) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of created nodes + + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_created) + + self._set_created_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new created nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_created) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_created, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the created + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_created, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_created: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_created): + """ + opening the placeholder's group and copying created nodes in it. + + Returns : + nodes_created (list): the new list of pasted nodes + """ + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_created) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_created = nuke.selectedNodes() + return nodes_created + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is create.""" + + nodes_created = placeholder.data["last_created"] + created_backdrops = [] + bd_orders = set() + for node in nodes_created: + if isinstance(node, nuke.BackdropNode): + created_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in created_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes created with it) + - add Id to the attributes of all the other nodes + """ + + created_nodes = placeholder.data["last_created"] + created_nodes_set = set(created_nodes) + + for node in created_nodes: + node_knobs = node.knobs() + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(created_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_created_connections(self, placeholder): + """ + set inputs and outputs of created nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_created"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + created with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_created"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..258f48c9d3 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,455 @@ +import nuke + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + LoadPlaceholderItem, + PlaceholderLoadMixin, +) +from ayon_core.hosts.nuke.api.lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, +) +from ayon_core.hosts.nuke.api.workfile_template_builder import ( + NukePlaceholderPlugin +) + + +class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): + identifier = "nuke.load" + label = "Nuke load" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderLoadPlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + loaded_representation_ids = set() + for node in nuke.allNodes(): + if "repre_id" in node.knobs(): + loaded_representation_ids.add( + node.knob("repre_id").getValue() + ) + + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def _before_placeholder_load(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def _before_repre_load(self, placeholder, representation): + placeholder.data["last_repre_id"] = representation["id"] + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + # TODO do data validations and maybe updgrades if are invalid + output.append( + LoadPlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + # TODO get from shared populate data! + nodes_init = placeholder.data["nodes_init"] + nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Loaded nodes: {}".format(nodes_loaded)) + if not nodes_loaded: + return + + placeholder.data["delete"] = True + + nodes_loaded = self._move_to_placeholder_group( + placeholder, nodes_loaded + ) + placeholder.data["last_loaded"] = nodes_loaded + refresh_nodes(nodes_loaded) + + # positioning of the loaded nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_loaded) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of loaded nodes + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) + + self._set_loaded_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new loaded nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_loaded) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_loaded, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the loaded + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_loaded, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_loaded): + """ + opening the placeholder's group and copying loaded nodes in it. + + Returns : + nodes_loaded (list): the new list of pasted nodes + """ + + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_loaded) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_loaded = nuke.selectedNodes() + return nodes_loaded + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is loaded.""" + + nodes_loaded = placeholder.data["last_loaded"] + loaded_backdrops = [] + bd_orders = set() + for node in nodes_loaded: + if isinstance(node, nuke.BackdropNode): + loaded_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in loaded_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes loaded with it) + - add Id to the attributes of all the other nodes + """ + + loaded_nodes = placeholder.data["last_loaded"] + loaded_nodes_set = set(loaded_nodes) + data = {"repre_id": str(placeholder.data["last_repre_id"])} + + for node in loaded_nodes: + node_knobs = node.knobs() + if "builder_type" not in node_knobs: + # save the id of representation for all imported nodes + imprint(node, data) + node.knob("repre_id").setVisible(False) + refresh_node(node) + continue + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(loaded_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_loaded_connections(self, placeholder): + """ + set inputs and outputs of loaded nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + loaded with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) diff --git a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py index 26f2469844..a44c3490c6 100644 --- a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py @@ -35,8 +35,12 @@ class ImageCreator(Creator): create_empty_group = False stub = api.stub() # only after PS is up - top_level_selected_items = stub.get_selected_layers() if pre_create_data.get("use_selection"): + try: + top_level_selected_items = stub.get_selected_layers() + except ValueError: + raise CreatorError("Cannot group locked Background layer!") + only_single_item_selected = len(top_level_selected_items) == 1 if ( only_single_item_selected or @@ -50,11 +54,12 @@ class ImageCreator(Creator): group = stub.group_selected_layers(product_name_from_ui) groups_to_create.append(group) else: - stub.select_layers(stub.get_layers()) try: + stub.select_layers(stub.get_layers()) group = stub.group_selected_layers(product_name_from_ui) - except: + except ValueError: raise CreatorError("Cannot group locked Background layer!") + groups_to_create.append(group) # create empty group if nothing selected diff --git a/client/ayon_core/hosts/traypublisher/csv_publish.py b/client/ayon_core/hosts/traypublisher/csv_publish.py index b43792a357..2762172936 100644 --- a/client/ayon_core/hosts/traypublisher/csv_publish.py +++ b/client/ayon_core/hosts/traypublisher/csv_publish.py @@ -1,5 +1,3 @@ -import os - import pyblish.api import pyblish.util diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py index 4d865c1c5c..da05afe86b 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_colorspace_look.py @@ -156,14 +156,9 @@ This creator publishes color space look file (LUT). ] def apply_settings(self, project_settings): - host = self.create_context.host - host_name = host.name - project_name = host.get_current_project_name() - config_data = colorspace.get_imageio_config( - project_name, host_name, + config_data = colorspace.get_current_context_imageio_config_preset( project_settings=project_settings ) - if not config_data: self.enabled = False return diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py new file mode 100644 index 0000000000..82b109be28 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_editorial_package.py @@ -0,0 +1,96 @@ +from pathlib import Path + +from ayon_core.pipeline import ( + CreatedInstance, +) + +from ayon_core.lib.attribute_definitions import ( + FileDef, + BoolDef, + TextDef, +) +from ayon_core.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class EditorialPackageCreator(TrayPublishCreator): + """Creates instance for OTIO file from published folder. + + Folder contains OTIO file and exported .mov files. Process should publish + whole folder as single `editorial_pckg` product type and (possibly) convert + .mov files into different format and copy them into `publish` `resources` + subfolder. + """ + identifier = "editorial_pckg" + label = "Editorial package" + product_type = "editorial_pckg" + description = "Publish folder with OTIO file and resources" + + # Position batch creator after simple creators + order = 120 + + conversion_enabled = False + + def apply_settings(self, project_settings): + self.conversion_enabled = ( + project_settings["traypublisher"] + ["publish"] + ["ExtractEditorialPckgConversion"] + ["conversion_enabled"] + ) + + def get_icon(self): + return "fa.folder" + + def create(self, product_name, instance_data, pre_create_data): + folder_path = pre_create_data.get("folder_path") + if not folder_path: + return + + instance_data["creator_attributes"] = { + "folder_path": (Path(folder_path["directory"]) / + Path(folder_path["filenames"][0])).as_posix(), + "conversion_enabled": pre_create_data["conversion_enabled"] + } + + # Create new instance + new_instance = CreatedInstance(self.product_type, product_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return [ + FileDef( + "folder_path", + folders=True, + single_item=True, + extensions=[], + allow_sequences=False, + label="Folder path" + ), + BoolDef("conversion_enabled", + tooltip="Convert to output defined in Settings.", + default=self.conversion_enabled, + label="Convert resources"), + ] + + def get_instance_attr_defs(self): + return [ + TextDef( + "folder_path", + label="Folder path", + disabled=True + ), + BoolDef("conversion_enabled", + tooltip="Convert to output defined in Settings.", + label="Convert resources"), + ] + + def get_detail_description(self): + return """# Publish folder with OTIO file and video clips + + Folder contains OTIO file and exported .mov files. Process should + publish whole folder as single `editorial_pckg` product type and + (possibly) convert .mov files into different format and copy them into + `publish` `resources` subfolder. + """ diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py new file mode 100644 index 0000000000..cb1277546c --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_editorial_package.py @@ -0,0 +1,58 @@ +"""Produces instance.data["editorial_pckg"] data used during integration. + +Requires: + instance.data["creator_attributes"]["path"] - from creator + +Provides: + instance -> editorial_pckg (dict): + folder_path (str) + otio_path (str) - from dragged folder + resource_paths (list) + +""" +import os + +import pyblish.api + +from ayon_core.lib.transcoding import VIDEO_EXTENSIONS + + +class CollectEditorialPackage(pyblish.api.InstancePlugin): + """Collects path to OTIO file and resources""" + + label = "Collect Editorial Package" + order = pyblish.api.CollectorOrder - 0.1 + + hosts = ["traypublisher"] + families = ["editorial_pckg"] + + def process(self, instance): + folder_path = instance.data["creator_attributes"]["folder_path"] + if not folder_path or not os.path.exists(folder_path): + self.log.info(( + "Instance doesn't contain collected existing folder path." + )) + return + + instance.data["editorial_pckg"] = {} + instance.data["editorial_pckg"]["folder_path"] = folder_path + + otio_path, resource_paths = ( + self._get_otio_and_resource_paths(folder_path)) + + instance.data["editorial_pckg"]["otio_path"] = otio_path + instance.data["editorial_pckg"]["resource_paths"] = resource_paths + + def _get_otio_and_resource_paths(self, folder_path): + otio_path = None + resource_paths = [] + + file_names = os.listdir(folder_path) + for filename in file_names: + _, ext = os.path.splitext(filename) + file_path = os.path.join(folder_path, filename) + if ext == ".otio": + otio_path = file_path + elif ext in VIDEO_EXTENSIONS: + resource_paths.append(file_path) + return otio_path, resource_paths diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py index 8e29a0048d..5fbb9a6f4c 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_explicit_colorspace.py @@ -1,10 +1,7 @@ import pyblish.api -from ayon_core.pipeline import ( - publish, - registered_host -) from ayon_core.lib import EnumDef from ayon_core.pipeline import colorspace +from ayon_core.pipeline import publish from ayon_core.pipeline.publish import KnownPublishError @@ -19,9 +16,10 @@ class CollectColorspace(pyblish.api.InstancePlugin, families = ["render", "plate", "reference", "image", "online"] enabled = False - colorspace_items = [ + default_colorspace_items = [ (None, "Don't override") ] + colorspace_items = list(default_colorspace_items) colorspace_attr_show = False config_items = None @@ -69,14 +67,13 @@ class CollectColorspace(pyblish.api.InstancePlugin, @classmethod def apply_settings(cls, project_settings): - host = registered_host() - host_name = host.name - project_name = host.get_current_project_name() - config_data = colorspace.get_imageio_config( - project_name, host_name, + config_data = colorspace.get_current_context_imageio_config_preset( project_settings=project_settings ) + enabled = False + colorspace_items = list(cls.default_colorspace_items) + config_items = None if config_data: filepath = config_data["path"] config_items = colorspace.get_ocio_config_colorspaces(filepath) @@ -85,9 +82,11 @@ class CollectColorspace(pyblish.api.InstancePlugin, include_aliases=True, include_roles=True ) - cls.config_items = config_items - cls.colorspace_items.extend(labeled_colorspaces) - cls.enabled = True + colorspace_items.extend(labeled_colorspaces) + + cls.config_items = config_items + cls.colorspace_items = colorspace_items + cls.enabled = enabled @classmethod def get_attribute_defs(cls): diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py similarity index 64% rename from client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py rename to client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py index 4d203649c7..2e564a2e4e 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_asset_entity.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_frame_data_from_folder_entity.py @@ -10,9 +10,13 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.491 label = "Collect Missing Frame Data From Folder" - families = ["plate", "pointcache", - "vdbcache", "online", - "render"] + families = [ + "plate", + "pointcache", + "vdbcache", + "online", + "render", + ] hosts = ["traypublisher"] def process(self, instance): @@ -22,16 +26,26 @@ class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin): "frameStart", "frameEnd", "handleStart", - "handleEnd" + "handleEnd", ): if key not in instance.data: missing_keys.append(key) + + # Skip the logic if all keys are already collected. + # NOTE: In editorial is not 'folderEntity' filled, so it would crash + # even if we don't need it. + if not missing_keys: + return + keys_set = [] folder_attributes = instance.data["folderEntity"]["attrib"] for key in missing_keys: if key in folder_attributes: instance.data[key] = folder_attributes[key] keys_set.append(key) + if keys_set: - self.log.debug(f"Frame range data {keys_set} " - "has been collected from folder entity.") + self.log.debug( + f"Frame range data {keys_set} " + "has been collected from folder entity." + ) diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py new file mode 100644 index 0000000000..6dd4e84704 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_editorial_pckg.py @@ -0,0 +1,232 @@ +import copy +import os.path +import subprocess + +import opentimelineio + +import pyblish.api + +from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess +from ayon_core.pipeline import publish + + +class ExtractEditorialPckgConversion(publish.Extractor): + """Replaces movie paths in otio file with publish rootless + + Prepares movie resources for integration (adds them to `transfers`). + Converts .mov files according to output definition. + """ + + label = "Extract Editorial Package" + order = pyblish.api.ExtractorOrder - 0.45 + hosts = ["traypublisher"] + families = ["editorial_pckg"] + + def process(self, instance): + editorial_pckg_data = instance.data.get("editorial_pckg") + + otio_path = editorial_pckg_data["otio_path"] + otio_basename = os.path.basename(otio_path) + staging_dir = self.staging_dir(instance) + + editorial_pckg_repre = { + 'name': "editorial_pckg", + 'ext': "otio", + 'files': otio_basename, + "stagingDir": staging_dir, + } + otio_staging_path = os.path.join(staging_dir, otio_basename) + + instance.data["representations"].append(editorial_pckg_repre) + + publish_resource_folder = self._get_publish_resource_folder(instance) + resource_paths = editorial_pckg_data["resource_paths"] + transfers = self._get_transfers(resource_paths, + publish_resource_folder) + + project_settings = instance.context.data["project_settings"] + output_def = (project_settings["traypublisher"] + ["publish"] + ["ExtractEditorialPckgConversion"] + ["output"]) + + conversion_enabled = (instance.data["creator_attributes"] + ["conversion_enabled"]) + + if conversion_enabled and output_def["ext"]: + transfers = self._convert_resources(output_def, transfers) + + instance.data["transfers"] = transfers + + source_to_rootless = self._get_resource_path_mapping(instance, + transfers) + + otio_data = editorial_pckg_data["otio_data"] + otio_data = self._replace_target_urls(otio_data, source_to_rootless) + + opentimelineio.adapters.write_to_file(otio_data, otio_staging_path) + + self.log.info("Added Editorial Package representation: {}".format( + editorial_pckg_repre)) + + def _get_publish_resource_folder(self, instance): + """Calculates publish folder and create it.""" + publish_path = self._get_published_path(instance) + publish_folder = os.path.dirname(publish_path) + publish_resource_folder = os.path.join(publish_folder, "resources") + + if not os.path.exists(publish_resource_folder): + os.makedirs(publish_resource_folder, exist_ok=True) + return publish_resource_folder + + def _get_resource_path_mapping(self, instance, transfers): + """Returns dict of {source_mov_path: rootless_published_path}.""" + replace_paths = {} + anatomy = instance.context.data["anatomy"] + for source, destination in transfers: + rootless_path = self._get_rootless(anatomy, destination) + source_file_name = os.path.basename(source) + replace_paths[source_file_name] = rootless_path + return replace_paths + + def _get_transfers(self, resource_paths, publish_resource_folder): + """Returns list of tuples (source, destination) with movie paths.""" + transfers = [] + for res_path in resource_paths: + res_basename = os.path.basename(res_path) + pub_res_path = os.path.join(publish_resource_folder, res_basename) + transfers.append((res_path, pub_res_path)) + return transfers + + def _replace_target_urls(self, otio_data, replace_paths): + """Replace original movie paths with published rootless ones.""" + for track in otio_data.tracks: + for clip in track: + # Check if the clip has a media reference + if clip.media_reference is not None: + # Access the target_url from the media reference + target_url = clip.media_reference.target_url + if not target_url: + continue + file_name = os.path.basename(target_url) + replace_path = replace_paths.get(file_name) + if replace_path: + clip.media_reference.target_url = replace_path + if clip.name == file_name: + clip.name = os.path.basename(replace_path) + + return otio_data + + def _get_rootless(self, anatomy, path): + """Try to find rootless {root[work]} path from `path`""" + success, rootless_path = anatomy.find_root_template_from_path( + path) + if not success: + # `rootless_path` is not set to `output_dir` if none of roots match + self.log.warning( + f"Could not find root path for remapping '{path}'." + ) + rootless_path = path + + return rootless_path + + def _get_published_path(self, instance): + """Calculates expected `publish` folder""" + # determine published path from Anatomy. + template_data = instance.data.get("anatomyData") + rep = instance.data["representations"][0] + template_data["representation"] = rep.get("name") + template_data["ext"] = rep.get("ext") + template_data["comment"] = None + + anatomy = instance.context.data["anatomy"] + template_data["root"] = anatomy.roots + template = anatomy.get_template_item("publish", "default", "path") + template_filled = template.format_strict(template_data) + return os.path.normpath(template_filled) + + def _convert_resources(self, output_def, transfers): + """Converts all resource files to configured format.""" + out_extension = output_def["ext"] + if not out_extension: + self.log.warning("No output extension configured in " + "ayon+settings://traypublisher/publish/ExtractEditorialPckgConversion") # noqa + return transfers + + final_transfers = [] + out_def_ffmpeg_args = output_def["ffmpeg_args"] + ffmpeg_input_args = [ + value.strip() + for value in out_def_ffmpeg_args["input"] + if value.strip() + ] + ffmpeg_video_filters = [ + value.strip() + for value in out_def_ffmpeg_args["video_filters"] + if value.strip() + ] + ffmpeg_audio_filters = [ + value.strip() + for value in out_def_ffmpeg_args["audio_filters"] + if value.strip() + ] + ffmpeg_output_args = [ + value.strip() + for value in out_def_ffmpeg_args["output"] + if value.strip() + ] + ffmpeg_input_args = self._split_ffmpeg_args(ffmpeg_input_args) + + generic_args = [ + subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg")) + ] + generic_args.extend(ffmpeg_input_args) + if ffmpeg_video_filters: + generic_args.append("-filter:v") + generic_args.append( + "\"{}\"".format(",".join(ffmpeg_video_filters))) + + if ffmpeg_audio_filters: + generic_args.append("-filter:a") + generic_args.append( + "\"{}\"".format(",".join(ffmpeg_audio_filters))) + + for source, destination in transfers: + base_name = os.path.basename(destination) + file_name, ext = os.path.splitext(base_name) + dest_path = os.path.join(os.path.dirname(destination), + f"{file_name}.{out_extension}") + final_transfers.append((source, dest_path)) + + all_args = copy.deepcopy(generic_args) + all_args.append(f"-i \"{source}\"") + all_args.extend(ffmpeg_output_args) # order matters + all_args.append(f"\"{dest_path}\"") + subprcs_cmd = " ".join(all_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + run_subprocess(subprcs_cmd, shell=True, logger=self.log) + return final_transfers + + def _split_ffmpeg_args(self, in_args): + """Makes sure all entered arguments are separated in individual items. + + Split each argument string with " -" to identify if string contains + one or more arguments. + """ + splitted_args = [] + for arg in in_args: + sub_args = arg.split(" -") + if len(sub_args) == 1: + if arg and arg not in splitted_args: + splitted_args.append(arg) + continue + + for idx, arg in enumerate(sub_args): + if idx != 0: + arg = "-" + arg + + if arg and arg not in splitted_args: + splitted_args.append(arg) + return splitted_args diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py new file mode 100644 index 0000000000..c63c4a6a73 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_editorial_package.py @@ -0,0 +1,68 @@ +import os +import opentimelineio + +import pyblish.api +from ayon_core.pipeline import PublishValidationError + + +class ValidateEditorialPackage(pyblish.api.InstancePlugin): + """Checks that published folder contains all resources from otio + + Currently checks only by file names and expects flat structure. + It ignores path to resources in otio file as folder might be dragged in and + published from different location than it was created. + """ + + label = "Validate Editorial Package" + order = pyblish.api.ValidatorOrder - 0.49 + + hosts = ["traypublisher"] + families = ["editorial_pckg"] + + def process(self, instance): + editorial_pckg_data = instance.data.get("editorial_pckg") + if not editorial_pckg_data: + raise PublishValidationError("Editorial package not collected") + + folder_path = editorial_pckg_data["folder_path"] + + otio_path = editorial_pckg_data["otio_path"] + if not otio_path: + raise PublishValidationError( + f"Folder {folder_path} missing otio file") + + resource_paths = editorial_pckg_data["resource_paths"] + + resource_file_names = {os.path.basename(path) + for path in resource_paths} + + otio_data = opentimelineio.adapters.read_from_file(otio_path) + + target_urls = self._get_all_target_urls(otio_data) + missing_files = set() + for target_url in target_urls: + target_basename = os.path.basename(target_url) + if target_basename not in resource_file_names: + missing_files.add(target_basename) + + if missing_files: + raise PublishValidationError( + f"Otio file contains missing files `{missing_files}`.\n\n" + f"Please add them to `{folder_path}` and republish.") + + instance.data["editorial_pckg"]["otio_data"] = otio_data + + def _get_all_target_urls(self, otio_data): + target_urls = [] + + # Iterate through tracks, clips, or other elements + for track in otio_data.tracks: + for clip in track: + # Check if the clip has a media reference + if clip.media_reference is not None: + # Access the target_url from the media reference + target_url = clip.media_reference.target_url + if target_url: + target_urls.append(target_url) + + return target_urls diff --git a/client/ayon_core/hosts/unreal/lib.py b/client/ayon_core/hosts/unreal/lib.py index 37122b2096..185853a0aa 100644 --- a/client/ayon_core/hosts/unreal/lib.py +++ b/client/ayon_core/hosts/unreal/lib.py @@ -80,17 +80,21 @@ def get_engine_versions(env=None): def get_editor_exe_path(engine_path: Path, engine_version: str) -> Path: """Get UE Editor executable path.""" ue_path = engine_path / "Engine/Binaries" + + ue_name = "UnrealEditor" + + # handle older versions of Unreal Engine + if engine_version.split(".")[0] == "4": + ue_name = "UE4Editor" + if platform.system().lower() == "windows": - if engine_version.split(".")[0] == "4": - ue_path /= "Win64/UE4Editor.exe" - elif engine_version.split(".")[0] == "5": - ue_path /= "Win64/UnrealEditor.exe" + ue_path /= f"Win64/{ue_name}.exe" elif platform.system().lower() == "linux": - ue_path /= "Linux/UE4Editor" + ue_path /= f"Linux/{ue_name}" elif platform.system().lower() == "darwin": - ue_path /= "Mac/UE4Editor" + ue_path /= f"Mac/{ue_name}" return ue_path diff --git a/client/ayon_core/hosts/unreal/ue_workers.py b/client/ayon_core/hosts/unreal/ue_workers.py index e3f8729c2e..256c0557be 100644 --- a/client/ayon_core/hosts/unreal/ue_workers.py +++ b/client/ayon_core/hosts/unreal/ue_workers.py @@ -260,11 +260,11 @@ class UEProjectGenerationWorker(UEWorker): self.failed.emit(msg, return_code) raise RuntimeError(msg) - # ensure we have PySide2 installed in engine + # ensure we have PySide2/6 installed in engine self.progress.emit(0) self.stage_begin.emit( - (f"Checking PySide2 installation... {stage_count} " + (f"Checking Qt bindings installation... {stage_count} " f" out of {stage_count}")) python_path = None if platform.system().lower() == "windows": @@ -287,11 +287,30 @@ class UEProjectGenerationWorker(UEWorker): msg = f"Unreal Python not found at {python_path}" self.failed.emit(msg, 1) raise RuntimeError(msg) - pyside_cmd = [python_path.as_posix(), - "-m", - "pip", - "install", - "pyside2"] + + pyside_version = "PySide2" + ue_version = self.ue_version.split(".") + if int(ue_version[0]) == 5 and int(ue_version[1]) >= 4: + # Use PySide6 6.6.3 because 6.7.0 had a bug + # - 'QPushButton' can't be added to 'QBoxLayout' + pyside_version = "PySide6==6.6.3" + + site_packages_prefix = python_path.parent.as_posix() + + pyside_cmd = [ + python_path.as_posix(), + "-m", "pip", + "install", + "--ignore-installed", + pyside_version, + + ] + + if platform.system().lower() == "windows": + pyside_cmd += ["--target", site_packages_prefix] + + print(f"--- Installing {pyside_version} ...") + print(" ".join(pyside_cmd)) pyside_install = subprocess.Popen(pyside_cmd, stdout=subprocess.PIPE, @@ -306,8 +325,8 @@ class UEProjectGenerationWorker(UEWorker): return_code = pyside_install.wait() if return_code and return_code != 0: - msg = ("Failed to create the project! " - "The installation of PySide2 has failed!") + msg = (f"Failed to create the project! {return_code} " + f"The installation of {pyside_version} has failed!: {pyside_install}") self.failed.emit(msg, return_code) raise RuntimeError(msg) diff --git a/client/ayon_core/lib/__init__.py b/client/ayon_core/lib/__init__.py index 408262ca42..e25d3479ee 100644 --- a/client/ayon_core/lib/__init__.py +++ b/client/ayon_core/lib/__init__.py @@ -27,6 +27,10 @@ from .local_settings import ( get_openpype_username, ) from .ayon_connection import initialize_ayon_connection +from .cache import ( + CacheItem, + NestedCacheItem, +) from .events import ( emit_event, register_event_callback @@ -135,6 +139,7 @@ from .path_tools import ( ) from .ayon_info import ( + is_in_ayon_launcher_process, is_running_from_build, is_using_ayon_console, is_staging_enabled, @@ -157,6 +162,9 @@ __all__ = [ "initialize_ayon_connection", + "CacheItem", + "NestedCacheItem", + "emit_event", "register_event_callback", @@ -241,6 +249,7 @@ __all__ = [ "Logger", + "is_in_ayon_launcher_process", "is_running_from_build", "is_using_ayon_console", "is_staging_enabled", diff --git a/client/ayon_core/lib/ayon_info.py b/client/ayon_core/lib/ayon_info.py index fc09a7c90c..c4333fab95 100644 --- a/client/ayon_core/lib/ayon_info.py +++ b/client/ayon_core/lib/ayon_info.py @@ -1,4 +1,5 @@ import os +import sys import json import datetime import platform @@ -25,6 +26,18 @@ def get_ayon_launcher_version(): return content["__version__"] +def is_in_ayon_launcher_process(): + """Determine if current process is running from AYON launcher. + + Returns: + bool: True if running from AYON launcher. + + """ + ayon_executable_path = os.path.normpath(os.environ["AYON_EXECUTABLE"]) + executable_path = os.path.normpath(sys.executable) + return ayon_executable_path == executable_path + + def is_running_from_build(): """Determine if current process is running from build or code. diff --git a/client/ayon_core/lib/cache.py b/client/ayon_core/lib/cache.py new file mode 100644 index 0000000000..dc83520f76 --- /dev/null +++ b/client/ayon_core/lib/cache.py @@ -0,0 +1,250 @@ +import time +import collections + +InitInfo = collections.namedtuple( + "InitInfo", + ["default_factory", "lifetime"] +) + + +def _default_factory_func(): + return None + + +class CacheItem: + """Simple cache item with lifetime and default factory for default value. + + Default factory should return default value that is used on init + and on reset. + + Args: + default_factory (Optional[callable]): Function that returns default + value used on init and on reset. + lifetime (Optional[int]): Lifetime of the cache data in seconds. + Default lifetime is 120 seconds. + + """ + def __init__(self, default_factory=None, lifetime=None): + if lifetime is None: + lifetime = 120 + self._lifetime = lifetime + self._last_update = None + if default_factory is None: + default_factory = _default_factory_func + self._default_factory = default_factory + self._data = default_factory() + + @property + def is_valid(self): + """Is cache valid to use. + + Return: + bool: True if cache is valid, False otherwise. + + """ + if self._last_update is None: + return False + + return (time.time() - self._last_update) < self._lifetime + + def set_lifetime(self, lifetime): + """Change lifetime of cache item. + + Args: + lifetime (int): Lifetime of the cache data in seconds. + """ + + self._lifetime = lifetime + + def set_invalid(self): + """Set cache as invalid.""" + + self._last_update = None + + def reset(self): + """Set cache as invalid and reset data.""" + + self._last_update = None + self._data = self._default_factory() + + def get_data(self): + """Receive cached data. + + Returns: + Any: Any data that are cached. + + """ + return self._data + + def update_data(self, data): + """Update cache data. + + Args: + data (Any): Any data that are cached. + + """ + self._data = data + self._last_update = time.time() + + +class NestedCacheItem: + """Helper for cached items stored in nested structure. + + Example: + >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0) + >>> cache["a"]["b"].is_valid + False + >>> cache["a"]["b"].get_data() + 0 + >>> cache["a"]["b"] = 1 + >>> cache["a"]["b"].is_valid + True + >>> cache["a"]["b"].get_data() + 1 + >>> cache.reset() + >>> cache["a"]["b"].is_valid + False + + Args: + levels (int): Number of nested levels where read cache is stored. + default_factory (Optional[callable]): Function that returns default + value used on init and on reset. + lifetime (Optional[int]): Lifetime of the cache data in seconds. + Default value is based on default value of 'CacheItem'. + _init_info (Optional[InitInfo]): Private argument. Init info for + nested cache where created from parent item. + + """ + def __init__( + self, levels=1, default_factory=None, lifetime=None, _init_info=None + ): + if levels < 1: + raise ValueError("Nested levels must be greater than 0") + self._data_by_key = {} + if _init_info is None: + _init_info = InitInfo(default_factory, lifetime) + self._init_info = _init_info + self._levels = levels + + def __getitem__(self, key): + """Get cached data. + + Args: + key (str): Key of the cache item. + + Returns: + Union[NestedCacheItem, CacheItem]: Cache item. + + """ + cache = self._data_by_key.get(key) + if cache is None: + if self._levels > 1: + cache = NestedCacheItem( + levels=self._levels - 1, + _init_info=self._init_info + ) + else: + cache = CacheItem( + self._init_info.default_factory, + self._init_info.lifetime + ) + self._data_by_key[key] = cache + return cache + + def __setitem__(self, key, value): + """Update cached data. + + Args: + key (str): Key of the cache item. + value (Any): Any data that are cached. + + """ + if self._levels > 1: + raise AttributeError(( + "{} does not support '__setitem__'. Lower nested level by {}" + ).format(self.__class__.__name__, self._levels - 1)) + cache = self[key] + cache.update_data(value) + + def get(self, key): + """Get cached data. + + Args: + key (str): Key of the cache item. + + Returns: + Union[NestedCacheItem, CacheItem]: Cache item. + + """ + return self[key] + + def cached_count(self): + """Amount of cached items. + + Returns: + int: Amount of cached items. + + """ + return len(self._data_by_key) + + def clear_key(self, key): + """Clear cached item by key. + + Args: + key (str): Key of the cache item. + + """ + self._data_by_key.pop(key, None) + + def clear_invalid(self): + """Clear all invalid cache items. + + Note: + To clear all cache items use 'reset'. + + """ + changed = {} + children_are_nested = self._levels > 1 + for key, cache in tuple(self._data_by_key.items()): + if children_are_nested: + output = cache.clear_invalid() + if output: + changed[key] = output + if not cache.cached_count(): + self._data_by_key.pop(key) + elif not cache.is_valid: + changed[key] = cache.get_data() + self._data_by_key.pop(key) + return changed + + def reset(self): + """Reset cache. + + Note: + To clear only invalid cache items use 'clear_invalid'. + + """ + self._data_by_key = {} + + def set_lifetime(self, lifetime): + """Change lifetime of all children cache items. + + Args: + lifetime (int): Lifetime of the cache data in seconds. + + """ + self._init_info.lifetime = lifetime + for cache in self._data_by_key.values(): + cache.set_lifetime(lifetime) + + @property + def is_valid(self): + """Raise reasonable error when called on wrong level. + + Raises: + AttributeError: If called on nested cache item. + + """ + raise AttributeError(( + "{} does not support 'is_valid'. Lower nested level by '{}'" + ).format(self.__class__.__name__, self._levels)) diff --git a/client/ayon_core/modules/deadline/__init__.py b/client/ayon_core/modules/deadline/__init__.py index 5631e501d8..683d8dbe4a 100644 --- a/client/ayon_core/modules/deadline/__init__.py +++ b/client/ayon_core/modules/deadline/__init__.py @@ -1,6 +1,8 @@ from .deadline_module import DeadlineModule +from .version import __version__ __all__ = ( "DeadlineModule", + "__version__" ) diff --git a/client/ayon_core/modules/deadline/abstract_submit_deadline.py b/client/ayon_core/modules/deadline/abstract_submit_deadline.py index 2e0518ae20..564966b6a0 100644 --- a/client/ayon_core/modules/deadline/abstract_submit_deadline.py +++ b/client/ayon_core/modules/deadline/abstract_submit_deadline.py @@ -29,15 +29,11 @@ from ayon_core.pipeline.publish.lib import ( JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) -# TODO both 'requests_post' and 'requests_get' should not set 'verify' based -# on environment variable. This should be done in a more controlled way, -# e.g. each deadline url could have checkbox to enabled/disable -# ssl verification. def requests_post(*args, **kwargs): """Wrap request post method. - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline server is + Disabling SSL certificate validation if ``verify`` kwarg is set to False. + This is useful when Deadline server is running with self-signed certificates and its certificate is not added to trusted certificates on client machines. @@ -46,9 +42,9 @@ def requests_post(*args, **kwargs): of defense SSL is providing, and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", - True) else True # noqa + auth = kwargs.get("auth") + if auth: + kwargs["auth"] = tuple(auth) # explicit cast to tuple # add 10sec timeout before bailing out kwargs['timeout'] = 10 return requests.post(*args, **kwargs) @@ -57,8 +53,8 @@ def requests_post(*args, **kwargs): def requests_get(*args, **kwargs): """Wrap request get method. - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline server is + Disabling SSL certificate validation if ``verify`` kwarg is set to False. + This is useful when Deadline server is running with self-signed certificates and its certificate is not added to trusted certificates on client machines. @@ -67,9 +63,9 @@ def requests_get(*args, **kwargs): of defense SSL is providing, and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", - True) else True # noqa + auth = kwargs.get("auth") + if auth: + kwargs["auth"] = tuple(auth) # add 10sec timeout before bailing out kwargs['timeout'] = 10 return requests.get(*args, **kwargs) @@ -434,9 +430,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """Plugin entry point.""" self._instance = instance context = instance.context - self._deadline_url = context.data.get("defaultDeadline") - self._deadline_url = instance.data.get( - "deadlineUrl", self._deadline_url) + self._deadline_url = instance.data["deadline"]["url"] assert self._deadline_url, "Requires Deadline Webservice URL" @@ -460,7 +454,9 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, self.plugin_info = self.get_plugin_info() self.aux_files = self.get_aux_files() - job_id = self.process_submission() + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + job_id = self.process_submission(auth, verify) self.log.info("Submitted job to Deadline: {}.".format(job_id)) # TODO: Find a way that's more generic and not render type specific @@ -473,10 +469,10 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, job_info=render_job_info, plugin_info=render_plugin_info ) - render_job_id = self.submit(payload) + render_job_id = self.submit(payload, auth, verify) self.log.info("Render job id: %s", render_job_id) - def process_submission(self): + def process_submission(self, auth=None, verify=True): """Process data for submission. This takes Deadline JobInfo, PluginInfo, AuxFile, creates payload @@ -487,7 +483,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """ payload = self.assemble_payload() - return self.submit(payload) + return self.submit(payload, auth, verify) @abstractmethod def get_job_info(self): @@ -577,7 +573,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, "AuxFiles": aux_files or self.aux_files } - def submit(self, payload): + def submit(self, payload, auth, verify): """Submit payload to Deadline API end-point. This takes payload in the form of JSON file and POST it to @@ -585,6 +581,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, Args: payload (dict): dict to become json in deadline submission. + auth (tuple): (username, password) + verify (bool): verify SSL certificate if present Returns: str: resulting Deadline job id. @@ -594,7 +592,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """ url = "{}/api/jobs".format(self._deadline_url) - response = requests_post(url, json=payload) + response = requests_post( + url, json=payload, auth=auth, verify=verify) if not response.ok: self.log.error("Submission failed!") self.log.error(response.status_code) diff --git a/client/ayon_core/modules/deadline/deadline_module.py b/client/ayon_core/modules/deadline/deadline_module.py index c0ba83477e..b1089bbfe2 100644 --- a/client/ayon_core/modules/deadline/deadline_module.py +++ b/client/ayon_core/modules/deadline/deadline_module.py @@ -19,23 +19,23 @@ class DeadlineModule(AYONAddon, IPluginPaths): def initialize(self, studio_settings): # This module is always enabled - deadline_urls = {} + deadline_servers_info = {} enabled = self.name in studio_settings if enabled: deadline_settings = studio_settings[self.name] - deadline_urls = { - url_item["name"]: url_item["value"] + deadline_servers_info = { + url_item["name"]: url_item for url_item in deadline_settings["deadline_urls"] } - if enabled and not deadline_urls: + if enabled and not deadline_servers_info: enabled = False self.log.warning(( "Deadline Webservice URLs are not specified. Disabling addon." )) self.enabled = enabled - self.deadline_urls = deadline_urls + self.deadline_servers_info = deadline_servers_info def get_plugin_paths(self): """Deadline plugin paths.""" @@ -45,13 +45,15 @@ class DeadlineModule(AYONAddon, IPluginPaths): } @staticmethod - def get_deadline_pools(webservice, log=None): + def get_deadline_pools(webservice, auth=None, log=None): """Get pools from Deadline. Args: webservice (str): Server url. - log (Logger) + auth (Optional[Tuple[str, str]]): Tuple containing username, + password + log (Optional[Logger]): Logger to log errors to, if provided. Returns: - list: Pools. + List[str]: Pools. Throws: RuntimeError: If deadline webservice is unreachable. @@ -63,7 +65,10 @@ class DeadlineModule(AYONAddon, IPluginPaths): argument = "{}/api/pools?NamesOnly=true".format(webservice) try: - response = requests_get(argument) + kwargs = {} + if auth: + kwargs["auth"] = auth + response = requests_get(argument, **kwargs) except requests.exceptions.ConnectionError as exc: msg = 'Cannot connect to DL web service {}'.format(webservice) log.error(msg) diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index ea4b7a213e..22022831a0 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -13,17 +13,45 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" # Run before collect_render. - order = pyblish.api.CollectorOrder + 0.005 + order = pyblish.api.CollectorOrder + 0.225 label = "Deadline Webservice from the Instance" - families = ["rendering", "renderlayer"] - hosts = ["maya"] + targets = ["local"] + families = ["render", + "rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender", + "usdrender", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop", + "publish.hou", + "image"] # for Fusion def process(self, instance): - instance.data["deadlineUrl"] = self._collect_deadline_url(instance) - instance.data["deadlineUrl"] = \ - instance.data["deadlineUrl"].strip().rstrip("/") + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + if not instance.data.get("deadline"): + instance.data["deadline"] = {} + + # todo: separate logic should be removed, all hosts should have same + host_name = instance.context.data["hostName"] + if host_name == "maya": + deadline_url = self._collect_deadline_url(instance) + else: + deadline_url = (instance.data.get("deadlineUrl") or # backwards + instance.data.get("deadline", {}).get("url")) + if deadline_url: + instance.data["deadline"]["url"] = deadline_url.strip().rstrip("/") + else: + instance.data["deadline"]["url"] = instance.context.data["deadline"]["defaultUrl"] # noqa self.log.debug( - "Using {} for submission.".format(instance.data["deadlineUrl"])) + "Using {} for submission".format(instance.data["deadline"]["url"])) def _collect_deadline_url(self, render_instance): # type: (pyblish.api.Instance) -> str @@ -49,13 +77,13 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): ["project_settings"] ["deadline"] ) - - default_server = render_instance.context.data["defaultDeadline"] + default_server_url = (render_instance.context.data["deadline"] + ["defaultUrl"]) # QUESTION How and where is this is set? Should be removed? instance_server = render_instance.data.get("deadlineServers") if not instance_server: self.log.debug("Using default server.") - return default_server + return default_server_url # Get instance server as sting. if isinstance(instance_server, int): @@ -66,7 +94,7 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): default_servers = { url_item["name"]: url_item["value"] - for url_item in deadline_settings["deadline_urls"] + for url_item in deadline_settings["deadline_servers_info"] } project_servers = ( render_instance.context.data diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py index b7ca227b01..9238e0ed95 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -18,10 +18,9 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): """ # Run before collect_deadline_server_instance. - order = pyblish.api.CollectorOrder + 0.0025 + order = pyblish.api.CollectorOrder + 0.200 label = "Default Deadline Webservice" - - pass_mongo_url = False + targets = ["local"] def process(self, context): try: @@ -33,15 +32,17 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): deadline_settings = context.data["project_settings"]["deadline"] deadline_server_name = deadline_settings["deadline_server"] - deadline_webservice = None + dl_server_info = None if deadline_server_name: - deadline_webservice = deadline_module.deadline_urls.get( + dl_server_info = deadline_module.deadline_servers_info.get( deadline_server_name) - default_deadline_webservice = deadline_module.deadline_urls["default"] - deadline_webservice = ( - deadline_webservice - or default_deadline_webservice - ) + if dl_server_info: + deadline_url = dl_server_info["value"] + else: + default_dl_server_info = deadline_module.deadline_servers_info[0] + deadline_url = default_dl_server_info["value"] - context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa + context.data["deadline"] = {} + context.data["deadline"]["defaultUrl"] = ( + deadline_url.strip().rstrip("/")) diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py new file mode 100644 index 0000000000..99d75ecb9e --- /dev/null +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +"""Collect user credentials + +Requires: + context -> project_settings + instance.data["deadline"]["url"] + +Provides: + instance.data["deadline"] -> require_authentication (bool) + instance.data["deadline"] -> auth (tuple (str, str)) - + (username, password) or None +""" +import pyblish.api + +from ayon_api import get_server_api_connection +from ayon_core.modules.deadline.deadline_module import DeadlineModule +from ayon_core.modules.deadline import __version__ + + +class CollectDeadlineUserCredentials(pyblish.api.InstancePlugin): + """Collects user name and password for artist if DL requires authentication + """ + order = pyblish.api.CollectorOrder + 0.250 + label = "Collect Deadline User Credentials" + + targets = ["local"] + hosts = ["aftereffects", + "blender", + "fusion", + "harmony", + "nuke", + "maya", + "max", + "houdini"] + + families = ["render", + "rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender", + "usdrender", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop", + "publish.hou"] + + def process(self, instance): + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + collected_deadline_url = instance.data["deadline"]["url"] + if not collected_deadline_url: + raise ValueError("Instance doesn't have '[deadline][url]'.") + context_data = instance.context.data + deadline_settings = context_data["project_settings"]["deadline"] + + deadline_server_name = None + # deadline url might be set directly from instance, need to find + # metadata for it + for deadline_info in deadline_settings["deadline_urls"]: + dl_settings_url = deadline_info["value"].strip().rstrip("/") + if dl_settings_url == collected_deadline_url: + deadline_server_name = deadline_info["name"] + break + + if not deadline_server_name: + raise ValueError(f"Collected {collected_deadline_url} doesn't " + "match any site configured in Studio Settings") + + instance.data["deadline"]["require_authentication"] = ( + deadline_info["require_authentication"] + ) + instance.data["deadline"]["auth"] = None + + instance.data["deadline"]["verify"] = ( + not deadline_info["not_verify_ssl"]) + + if not deadline_info["require_authentication"]: + return + # TODO import 'get_addon_site_settings' when available + # in public 'ayon_api' + local_settings = get_server_api_connection().get_addon_site_settings( + DeadlineModule.name, __version__) + local_settings = local_settings["local_settings"] + for server_info in local_settings: + if deadline_server_name == server_info["server_name"]: + instance.data["deadline"]["auth"] = (server_info["username"], + server_info["password"]) diff --git a/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml new file mode 100644 index 0000000000..eec05df08a --- /dev/null +++ b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml @@ -0,0 +1,17 @@ + + + + Deadline Authentication + +## Deadline authentication is required + +This project has set in Settings that Deadline requires authentication. + +### How to repair? + +Please go to Ayon Server > Site Settings and provide your Deadline username and password. +In some cases the password may be empty if Deadline is configured to allow that. Ask your administrator. + + + + \ No newline at end of file diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py index ab342c1a9d..311dbcedd5 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py @@ -174,7 +174,9 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, instance.data["toBeRenderedOn"] = "deadline" payload = self.assemble_payload() - return self.submit(payload) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + return self.submit(payload, auth=auth, verify=verify) def from_published_scene(self): """ diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py index 1fae23c9b2..a17bf0c3ef 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py @@ -2,9 +2,10 @@ import os import re import json import getpass -import requests import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post + class CelactionSubmitDeadline(pyblish.api.InstancePlugin): """Submit CelAction2D scene to Deadline @@ -30,11 +31,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin): context = instance.context - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" self.deadline_url = "{}/api/jobs".format(deadline_url) @@ -196,8 +193,11 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin): self.expected_files(instance, render_path) self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) - - response = requests.post(self.deadline_url, json=payload) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post(self.deadline_url, json=payload, + auth=auth, + verify=verify) if not response.ok: self.log.error( diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py index e3a4cd8030..6c70119628 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py @@ -2,17 +2,13 @@ import os import json import getpass -import requests - import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline.publish import ( AYONPyblishPluginMixin ) -from ayon_core.lib import ( - BoolDef, - NumberDef, -) +from ayon_core.lib import NumberDef class FusionSubmitDeadline( @@ -64,11 +60,6 @@ class FusionSubmitDeadline( decimals=0, minimum=1, maximum=10 - ), - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" ) ] @@ -80,10 +71,6 @@ class FusionSubmitDeadline( attribute_values = self.get_attr_values_from_data( instance.data) - # add suspend_publish attributeValue to instance data - instance.data["suspend_publish"] = attribute_values[ - "suspend_publish"] - context = instance.context key = "__hasRun{}".format(self.__class__.__name__) @@ -94,11 +81,7 @@ class FusionSubmitDeadline( from ayon_core.hosts.fusion.api.lib import get_frame_path - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" # Collect all saver instances in context that are to be rendered @@ -258,7 +241,9 @@ class FusionSubmitDeadline( # E.g. http://192.168.0.1:8082/api/jobs url = "{}/api/jobs".format(deadline_url) - response = requests.post(url, json=payload) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post(url, json=payload, auth=auth, verify=verify) if not response.ok: raise Exception(response.text) diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 6952604293..590abc3f12 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -10,7 +10,6 @@ from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from ayon_core.lib import ( is_in_tests, - BoolDef, TextDef, NumberDef ) @@ -86,15 +85,10 @@ class HoudiniSubmitDeadline( priority = 50 chunk_size = 1 group = "" - + @classmethod def get_attribute_defs(cls): return [ - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" - ), NumberDef( "priority", label="Priority", @@ -194,7 +188,7 @@ class HoudiniSubmitDeadline( job_info.Pool = instance.data.get("primaryPool") job_info.SecondaryPool = instance.data.get("secondaryPool") - + if split_render_job and is_export_job: job_info.Priority = attribute_values.get( "export_priority", self.export_priority @@ -315,6 +309,11 @@ class HoudiniSubmitDeadline( return attr.asdict(plugin_info) def process(self, instance): + if not instance.data["farm"]: + self.log.debug("Render on farm is disabled. " + "Skipping deadline submission.") + return + super(HoudiniSubmitDeadline, self).process(instance) # TODO: Avoid the need for this logic here, needed for submit publish diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py index cba05f6948..ababb01285 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py @@ -181,17 +181,27 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, self.log.debug("Submitting 3dsMax render..") project_settings = instance.context.data["project_settings"] + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] if instance.data.get("multiCamera"): self.log.debug("Submitting jobs for multiple cameras..") payload = self._use_published_name_for_multiples( payload_data, project_settings) job_infos, plugin_infos = payload for job_info, plugin_info in zip(job_infos, plugin_infos): - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit( + self.assemble_payload(job_info, plugin_info), + auth=auth, + verify=verify + ) else: payload = self._use_published_name(payload_data, project_settings) job_info, plugin_info = payload - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit( + self.assemble_payload(job_info, plugin_info), + auth=auth, + verify=verify + ) def _use_published_name(self, data, project_settings): # Not all hosts can import these modules. diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py index 0300b12104..f1bc1cb2be 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -292,7 +292,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, return plugin_payload - def process_submission(self): + def process_submission(self, auth=None, verify=True): from maya import cmds instance = self._instance @@ -332,7 +332,10 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, if "vrayscene" in instance.data["families"]: self.log.debug("Submitting V-Ray scene render..") vray_export_payload = self._get_vray_export_payload(payload_data) - export_job = self.submit(vray_export_payload) + + export_job = self.submit(vray_export_payload, + auth=auth, + verify=verify) payload = self._get_vray_render_payload(payload_data) @@ -351,7 +354,9 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, else: # Submit main render job job_info, plugin_info = payload - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit(self.assemble_payload(job_info, plugin_info), + auth=auth, + verify=verify) def _tile_render(self, payload): """Submit as tile render per frame with dependent assembly jobs.""" @@ -451,7 +456,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # Submit frame tile jobs frame_tile_job_id = {} for frame, tile_job_payload in frame_payloads.items(): - job_id = self.submit(tile_job_payload) + job_id = self.submit(tile_job_payload, + instance.data["deadline"]["auth"]) frame_tile_job_id[frame] = job_id # Define assembly payloads @@ -554,12 +560,18 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # Submit assembly jobs assembly_job_ids = [] num_assemblies = len(assembly_payloads) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] for i, payload in enumerate(assembly_payloads): self.log.debug( "submitting assembly job {} of {}".format(i + 1, num_assemblies) ) - assembly_job_id = self.submit(payload) + assembly_job_id = self.submit( + payload, + auth=auth, + verify=verify + ) assembly_job_ids.append(assembly_job_id) instance.data["assemblySubmissionJobs"] = assembly_job_ids diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py index d70cb75bf3..db35c2ae67 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -4,9 +4,9 @@ import json import getpass from datetime import datetime -import requests import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline.publish import ( AYONPyblishPluginMixin ) @@ -76,11 +76,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, default=cls.use_gpu, label="Use GPU" ), - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" - ), BoolDef( "workfile_dependency", default=cls.workfile_dependency, @@ -100,20 +95,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, instance.data["attributeValues"] = self.get_attr_values_from_data( instance.data) - # add suspend_publish attributeValue to instance data - instance.data["suspend_publish"] = instance.data["attributeValues"][ - "suspend_publish"] - families = instance.data["families"] node = instance.data["transientData"]["node"] context = instance.context - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" self.deadline_url = "{}/api/jobs".format(deadline_url) @@ -436,7 +423,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) - response = requests.post(self.deadline_url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post(self.deadline_url, + json=payload, + timeout=10, + auth=auth, + verify=verify) if not response.ok: raise Exception(response.text) diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py index 4e4657d886..103f1355da 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py @@ -5,10 +5,10 @@ import json import re from copy import deepcopy -import requests import ayon_api import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline import publish from ayon_core.lib import EnumDef, is_in_tests from ayon_core.pipeline.version_start import get_versioning_start @@ -147,9 +147,6 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, instance_settings = self.get_attr_values_from_data(instance.data) initial_status = instance_settings.get("publishJobState", "Active") - # TODO: Remove this backwards compatibility of `suspend_publish` - if instance.data.get("suspend_publish"): - initial_status = "Suspended" args = [ "--headless", @@ -212,7 +209,10 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, self.log.debug("Submitting Deadline publish job ...") url = "{}/api/jobs".format(self.deadline_url) - response = requests.post(url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post( + url, json=payload, timeout=10, auth=auth, verify=verify) if not response.ok: raise Exception(response.text) @@ -344,11 +344,7 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, deadline_publish_job_id = None if submission_type == "deadline": - # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") + self.deadline_url = instance.data["deadline"]["url"] assert self.deadline_url, "Requires Deadline Webservice URL" deadline_publish_job_id = \ @@ -356,7 +352,9 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, # Inject deadline url to instances. for inst in instances: - inst["deadlineUrl"] = self.deadline_url + if "deadline" not in inst: + inst["deadline"] = {} + inst["deadline"] = instance.data["deadline"] # publish job file publish_job = { diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py index 8def9cc63c..64313c5c4d 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py @@ -5,11 +5,11 @@ import json import re from copy import deepcopy -import requests import clique import ayon_api import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline import publish from ayon_core.lib import EnumDef, is_in_tests from ayon_core.pipeline.version_start import get_versioning_start @@ -88,9 +88,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, hosts = ["fusion", "max", "maya", "nuke", "houdini", "celaction", "aftereffects", "harmony", "blender"] - families = ["render.farm", "render.frames_farm", - "prerender.farm", "prerender.frames_farm", - "renderlayer", "imagesequence", + families = ["render", "render.farm", "render.frames_farm", + "prerender", "prerender.farm", "prerender.frames_farm", + "renderlayer", "imagesequence", "image", "vrayscene", "maxrender", "arnold_rop", "mantra_rop", "karma_rop", "vray_rop", @@ -224,9 +224,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, instance_settings = self.get_attr_values_from_data(instance.data) initial_status = instance_settings.get("publishJobState", "Active") - # TODO: Remove this backwards compatibility of `suspend_publish` - if instance.data.get("suspend_publish"): - initial_status = "Suspended" args = [ "--headless", @@ -306,7 +303,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, self.log.debug("Submitting Deadline publish job ...") url = "{}/api/jobs".format(self.deadline_url) - response = requests.post(url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + verify = instance.data["deadline"]["verify"] + response = requests_post( + url, json=payload, timeout=10, auth=auth, verify=verify) if not response.ok: raise Exception(response.text) @@ -314,7 +314,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, return deadline_publish_job_id - def process(self, instance): # type: (pyblish.api.Instance) -> None """Process plugin. @@ -461,18 +460,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, } # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") + self.deadline_url = instance.data["deadline"]["url"] assert self.deadline_url, "Requires Deadline Webservice URL" deadline_publish_job_id = \ self._submit_deadline_post_job(instance, render_job, instances) - # Inject deadline url to instances. + # Inject deadline url to instances to query DL for job id for overrides for inst in instances: - inst["deadlineUrl"] = self.deadline_url + inst["deadline"] = instance.data["deadline"] # publish job file publish_job = { diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py index a7b300beff..8fffd47786 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py @@ -1,5 +1,7 @@ import pyblish.api +from ayon_core.pipeline import PublishXmlValidationError + from openpype_modules.deadline.abstract_submit_deadline import requests_get @@ -8,27 +10,42 @@ class ValidateDeadlineConnection(pyblish.api.InstancePlugin): label = "Validate Deadline Web Service" order = pyblish.api.ValidatorOrder - hosts = ["maya", "nuke"] - families = ["renderlayer", "render"] + hosts = ["maya", "nuke", "aftereffects", "harmony", "fusion"] + families = ["renderlayer", "render", "render.farm"] # cache responses = {} def process(self, instance): - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") - self.log.debug( - "We have deadline URL on instance {}".format(deadline_url) - ) + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" + kwargs = {} + if instance.data["deadline"]["require_authentication"]: + auth = instance.data["deadline"]["auth"] + kwargs["auth"] = auth + + if not auth[0]: + raise PublishXmlValidationError( + self, + "Deadline requires authentication. " + "At least username is required to be set in " + "Site Settings.") + if deadline_url not in self.responses: - self.responses[deadline_url] = requests_get(deadline_url) + self.responses[deadline_url] = requests_get(deadline_url, **kwargs) response = self.responses[deadline_url] + if response.status_code == 401: + raise PublishXmlValidationError( + self, + "Deadline requires authentication. " + "Provided credentials are not working. " + "Please change them in Site Settings") assert response.ok, "Response must be ok" assert response.text.startswith("Deadline Web Service "), ( "Web service did not respond with 'Deadline Web Service'" diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py index 2feb044cf1..2fb511bf51 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -37,8 +37,9 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, self.log.debug("Skipping local instance.") return - deadline_url = self.get_deadline_url(instance) - pools = self.get_pools(deadline_url) + deadline_url = instance.data["deadline"]["url"] + pools = self.get_pools(deadline_url, + instance.data["deadline"].get("auth")) invalid_pools = {} primary_pool = instance.data.get("primaryPool") @@ -61,22 +62,18 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, formatting_data={"pools_str": ", ".join(pools)} ) - def get_deadline_url(self, instance): - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - if instance.data.get("deadlineUrl"): - # if custom one is set in instance, use that - deadline_url = instance.data.get("deadlineUrl") - return deadline_url - - def get_pools(self, deadline_url): + def get_pools(self, deadline_url, auth): if deadline_url not in self.pools_per_url: self.log.debug( "Querying available pools for Deadline url: {}".format( deadline_url) ) pools = DeadlineModule.get_deadline_pools(deadline_url, + auth=auth, log=self.log) + # some DL return "none" as a pool name + if "none" not in pools: + pools.append("none") self.log.info("Available pools: {}".format(pools)) self.pools_per_url[deadline_url] = pools diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index 6263526d5c..83e867408c 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -199,16 +199,16 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin): (dict): Job info from Deadline """ - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" url = "{}/api/jobs?JobID={}".format(deadline_url, job_id) try: - response = requests_get(url) + kwargs = {} + auth = instance.data["deadline"]["auth"] + if auth: + kwargs["auth"] = auth + response = requests_get(url, **kwargs) except requests.exceptions.ConnectionError: self.log.error("Deadline is not accessible at " "{}".format(deadline_url)) diff --git a/client/ayon_core/modules/deadline/version.py b/client/ayon_core/modules/deadline/version.py new file mode 100644 index 0000000000..569b1212f7 --- /dev/null +++ b/client/ayon_core/modules/deadline/version.py @@ -0,0 +1 @@ +__version__ = "0.1.10" diff --git a/client/ayon_core/modules/royalrender/api.py b/client/ayon_core/modules/royalrender/api.py index a69f88c43c..ef715811c5 100644 --- a/client/ayon_core/modules/royalrender/api.py +++ b/client/ayon_core/modules/royalrender/api.py @@ -7,7 +7,7 @@ from ayon_core.lib import Logger, run_subprocess, AYONSettingsRegistry from ayon_core.lib.vendor_bin_utils import find_tool_in_custom_paths from .rr_job import SubmitFile -from .rr_job import RRjob, SubmitterParameter # noqa F401 +from .rr_job import RRJob, SubmitterParameter # noqa F401 class Api: diff --git a/client/ayon_core/pipeline/__init__.py b/client/ayon_core/pipeline/__init__.py index d1a181a353..8fd00ee6b6 100644 --- a/client/ayon_core/pipeline/__init__.py +++ b/client/ayon_core/pipeline/__init__.py @@ -97,6 +97,15 @@ from .context_tools import ( get_current_folder_path, get_current_task_name ) + +from .workfile import ( + discover_workfile_build_plugins, + register_workfile_build_plugin, + deregister_workfile_build_plugin, + register_workfile_build_plugin_path, + deregister_workfile_build_plugin_path, +) + install = install_host uninstall = uninstall_host @@ -198,6 +207,13 @@ __all__ = ( "get_current_folder_path", "get_current_task_name", + # Workfile templates + "discover_workfile_build_plugins", + "register_workfile_build_plugin", + "deregister_workfile_build_plugin", + "register_workfile_build_plugin_path", + "deregister_workfile_build_plugin_path", + # Backwards compatible function names "install", "uninstall", diff --git a/client/ayon_core/pipeline/anatomy/anatomy.py b/client/ayon_core/pipeline/anatomy/anatomy.py index 2aa8eeddbc..98bbaa9bdc 100644 --- a/client/ayon_core/pipeline/anatomy/anatomy.py +++ b/client/ayon_core/pipeline/anatomy/anatomy.py @@ -3,11 +3,16 @@ import re import copy import platform import collections -import time import ayon_api -from ayon_core.lib import Logger, get_local_site_id, StringTemplate +from ayon_core.lib import ( + Logger, + get_local_site_id, + StringTemplate, + CacheItem, + NestedCacheItem, +) from ayon_core.addon import AddonsManager from .exceptions import RootCombinationError, ProjectNotSet @@ -397,62 +402,11 @@ class BaseAnatomy(object): ) -class CacheItem: - """Helper to cache data. - - Helper does not handle refresh of data and does not mark data as outdated. - Who uses the object should check of outdated state on his own will. - """ - - default_lifetime = 10 - - def __init__(self, lifetime=None): - self._data = None - self._cached = None - self._lifetime = lifetime or self.default_lifetime - - @property - def data(self): - """Cached data/object. - - Returns: - Any: Whatever was cached. - """ - - return self._data - - @property - def is_outdated(self): - """Item has outdated cache. - - Lifetime of cache item expired or was not yet set. - - Returns: - bool: Item is outdated. - """ - - if self._cached is None: - return True - return (time.time() - self._cached) > self._lifetime - - def update_data(self, data): - """Update cache of data. - - Args: - data (Any): Data to cache. - """ - - self._data = data - self._cached = time.time() - - class Anatomy(BaseAnatomy): - _sitesync_addon_cache = CacheItem() - _project_cache = collections.defaultdict(CacheItem) - _default_site_id_cache = collections.defaultdict(CacheItem) - _root_overrides_cache = collections.defaultdict( - lambda: collections.defaultdict(CacheItem) - ) + _project_cache = NestedCacheItem(lifetime=10) + _sitesync_addon_cache = CacheItem(lifetime=60) + _default_site_id_cache = NestedCacheItem(lifetime=60) + _root_overrides_cache = NestedCacheItem(2, lifetime=60) def __init__( self, project_name=None, site_name=None, project_entity=None @@ -477,18 +431,18 @@ class Anatomy(BaseAnatomy): @classmethod def get_project_entity_from_cache(cls, project_name): project_cache = cls._project_cache[project_name] - if project_cache.is_outdated: + if not project_cache.is_valid: project_cache.update_data(ayon_api.get_project(project_name)) - return copy.deepcopy(project_cache.data) + return copy.deepcopy(project_cache.get_data()) @classmethod def get_sitesync_addon(cls): - if cls._sitesync_addon_cache.is_outdated: + if not cls._sitesync_addon_cache.is_valid: manager = AddonsManager() cls._sitesync_addon_cache.update_data( manager.get_enabled_addon("sitesync") ) - return cls._sitesync_addon_cache.data + return cls._sitesync_addon_cache.get_data() @classmethod def _get_studio_roots_overrides(cls, project_name): @@ -533,14 +487,14 @@ class Anatomy(BaseAnatomy): elif not site_name: # Use sync server to receive active site name project_cache = cls._default_site_id_cache[project_name] - if project_cache.is_outdated: + if not project_cache.is_valid: project_cache.update_data( sitesync_addon.get_active_site_type(project_name) ) - site_name = project_cache.data + site_name = project_cache.get_data() site_cache = cls._root_overrides_cache[project_name][site_name] - if site_cache.is_outdated: + if not site_cache.is_valid: if site_name == "studio": # Handle studio root overrides without sync server # - studio root overrides can be done even without sync server @@ -553,4 +507,4 @@ class Anatomy(BaseAnatomy): project_name, site_name ) site_cache.update_data(roots_overrides) - return site_cache.data + return site_cache.get_data() diff --git a/client/ayon_core/pipeline/colorspace.py b/client/ayon_core/pipeline/colorspace.py index efa3bbf968..099616ff4a 100644 --- a/client/ayon_core/pipeline/colorspace.py +++ b/client/ayon_core/pipeline/colorspace.py @@ -8,16 +8,20 @@ import tempfile import warnings from copy import deepcopy +import ayon_api + from ayon_core import AYON_CORE_ROOT from ayon_core.settings import get_project_settings from ayon_core.lib import ( + filter_profiles, StringTemplate, run_ayon_launcher_process, - Logger + Logger, ) -from ayon_core.pipeline import Anatomy from ayon_core.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS - +from ayon_core.pipeline import Anatomy +from ayon_core.pipeline.template_data import get_template_data +from ayon_core.pipeline.load import get_representation_path_with_anatomy log = Logger.get_logger(__name__) @@ -32,10 +36,6 @@ class CachedData: } -class DeprecatedWarning(DeprecationWarning): - pass - - def deprecated(new_destination): """Mark functions as deprecated. @@ -60,13 +60,13 @@ def deprecated(new_destination): @functools.wraps(decorated_func) def wrapper(*args, **kwargs): - warnings.simplefilter("always", DeprecatedWarning) + warnings.simplefilter("always", DeprecationWarning) warnings.warn( ( "Call to deprecated function '{}'" "\nFunction was moved or removed.{}" ).format(decorated_func.__name__, warning_message), - category=DeprecatedWarning, + category=DeprecationWarning, stacklevel=4 ) return decorated_func(*args, **kwargs) @@ -81,28 +81,54 @@ def deprecated(new_destination): def _make_temp_json_file(): """Wrapping function for json temp file """ + temporary_json_file = None try: # Store dumped json to temporary file - temporary_json_file = tempfile.NamedTemporaryFile( + with tempfile.NamedTemporaryFile( mode="w", suffix=".json", delete=False - ) - temporary_json_file.close() - temporary_json_filepath = temporary_json_file.name.replace( - "\\", "/" - ) + ) as tmpfile: + temporary_json_filepath = tmpfile.name.replace("\\", "/") yield temporary_json_filepath - except IOError as _error: + except IOError as exc: raise IOError( - "Unable to create temp json file: {}".format( - _error - ) + "Unable to create temp json file: {}".format(exc) ) finally: # Remove the temporary json - os.remove(temporary_json_filepath) + if temporary_json_file is not None: + os.remove(temporary_json_filepath) + + +def has_compatible_ocio_package(): + """Current process has available compatible 'PyOpenColorIO'. + + Returns: + bool: True if compatible package is available. + + """ + if CachedData.has_compatible_ocio_package is not None: + return CachedData.has_compatible_ocio_package + + is_compatible = False + try: + import PyOpenColorIO + + # Check if PyOpenColorIO is compatible + # - version 2.0.0 or higher is required + # NOTE version 1 does not have '__version__' attribute + if hasattr(PyOpenColorIO, "__version__"): + version_parts = PyOpenColorIO.__version__.split(".") + major = int(version_parts[0]) + is_compatible = (major, ) >= (2, ) + except ImportError: + pass + + CachedData.has_compatible_ocio_package = is_compatible + # compatible + return CachedData.has_compatible_ocio_package def get_ocio_config_script_path(): @@ -110,53 +136,58 @@ def get_ocio_config_script_path(): Returns: str: path string + """ - return os.path.normpath( - os.path.join( - AYON_CORE_ROOT, - "scripts", - "ocio_wrapper.py" - ) + return os.path.join( + os.path.normpath(AYON_CORE_ROOT), + "scripts", + "ocio_wrapper.py" ) def get_colorspace_name_from_filepath( - filepath, host_name, project_name, - config_data=None, file_rules=None, + filepath, + host_name, + project_name, + config_data, + file_rules=None, project_settings=None, validate=True ): """Get colorspace name from filepath Args: - filepath (str): path string, file rule pattern is tested on it - host_name (str): host name - project_name (str): project name - config_data (Optional[dict]): config path and template in dict. - Defaults to None. - file_rules (Optional[dict]): file rule data from settings. - Defaults to None. - project_settings (Optional[dict]): project settings. Defaults to None. + filepath (str): Path string, file rule pattern is tested on it. + host_name (str): Host name. + project_name (str): Project name. + config_data (dict): Config path and template in dict. + file_rules (Optional[dict]): File rule data from settings. + project_settings (Optional[dict]): Project settings. validate (Optional[bool]): should resulting colorspace be validated - with config file? Defaults to True. + with config file? Defaults to True. Returns: - str: name of colorspace - """ - project_settings, config_data, file_rules = _get_context_settings( - host_name, project_name, - config_data=config_data, file_rules=file_rules, - project_settings=project_settings - ) + Union[str, None]: name of colorspace + """ if not config_data: # in case global or host color management is not enabled return None + if file_rules is None: + if project_settings is None: + project_settings = get_project_settings(project_name) + file_rules = get_imageio_file_rules( + project_name, host_name, project_settings + ) + # use ImageIO file rules colorspace_name = get_imageio_file_rules_colorspace_from_filepath( - filepath, host_name, project_name, - config_data=config_data, file_rules=file_rules, + filepath, + host_name, + project_name, + config_data=config_data, + file_rules=file_rules, project_settings=project_settings ) @@ -182,47 +213,18 @@ def get_colorspace_name_from_filepath( # validate matching colorspace with config if validate: validate_imageio_colorspace_in_config( - config_data["path"], colorspace_name) + config_data["path"], colorspace_name + ) return colorspace_name -# TODO: remove this in future - backward compatibility -@deprecated("get_imageio_file_rules_colorspace_from_filepath") -def get_imageio_colorspace_from_filepath(*args, **kwargs): - return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) - -# TODO: remove this in future - backward compatibility -@deprecated("get_imageio_file_rules_colorspace_from_filepath") -def get_colorspace_from_filepath(*args, **kwargs): - return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) - - -def _get_context_settings( - host_name, project_name, - config_data=None, file_rules=None, - project_settings=None -): - project_settings = project_settings or get_project_settings( - project_name - ) - - config_data = config_data or get_imageio_config( - project_name, host_name, project_settings) - - # in case host color management is not enabled - if not config_data: - return (None, None, None) - - file_rules = file_rules or get_imageio_file_rules( - project_name, host_name, project_settings) - - return project_settings, config_data, file_rules - - def get_imageio_file_rules_colorspace_from_filepath( - filepath, host_name, project_name, - config_data=None, file_rules=None, + filepath, + host_name, + project_name, + config_data, + file_rules=None, project_settings=None ): """Get colorspace name from filepath @@ -230,28 +232,28 @@ def get_imageio_file_rules_colorspace_from_filepath( ImageIO Settings file rules are tested for matching rule. Args: - filepath (str): path string, file rule pattern is tested on it - host_name (str): host name - project_name (str): project name - config_data (Optional[dict]): config path and template in dict. - Defaults to None. - file_rules (Optional[dict]): file rule data from settings. - Defaults to None. - project_settings (Optional[dict]): project settings. Defaults to None. + filepath (str): Path string, file rule pattern is tested on it. + host_name (str): Host name. + project_name (str): Project name. + config_data (dict): Config path and template in dict. + file_rules (Optional[dict]): File rule data from settings. + project_settings (Optional[dict]): Project settings. Returns: - str: name of colorspace - """ - project_settings, config_data, file_rules = _get_context_settings( - host_name, project_name, - config_data=config_data, file_rules=file_rules, - project_settings=project_settings - ) + Union[str, None]: Name of colorspace. + """ if not config_data: # in case global or host color management is not enabled return None + if file_rules is None: + if project_settings is None: + project_settings = get_project_settings(project_name) + file_rules = get_imageio_file_rules( + project_name, host_name, project_settings + ) + # match file rule from path colorspace_name = None for file_rule in file_rules: @@ -282,26 +284,48 @@ def get_config_file_rules_colorspace_from_filepath(config_path, filepath): Returns: Union[str, None]: matching colorspace name + """ - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess + if has_compatible_ocio_package(): + result_data = _get_config_file_rules_colorspace_from_filepath( + config_path, filepath + ) + else: result_data = _get_wrapped_with_subprocess( - "colorspace", "get_config_file_rules_colorspace_from_filepath", + "get_config_file_rules_colorspace_from_filepath", config_path=config_path, filepath=filepath ) - if result_data: - return result_data[0] - - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_config_file_rules_colorspace_from_filepath # noqa: E501 - - result_data = _get_config_file_rules_colorspace_from_filepath( - config_path, filepath) if result_data: return result_data[0] + return None + + +def get_config_version_data(config_path): + """Return major and minor version info. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: minor and major keys with values + + """ + if config_path not in CachedData.config_version_data: + if has_compatible_ocio_package(): + version_data = _get_config_version_data(config_path) + else: + version_data = _get_wrapped_with_subprocess( + "get_config_version_data", + config_path=config_path + ) + CachedData.config_version_data[config_path] = version_data + + return deepcopy(CachedData.config_version_data[config_path]) def parse_colorspace_from_filepath( @@ -344,10 +368,10 @@ def parse_colorspace_from_filepath( pattern = "|".join( # Allow to match spaces also as underscores because the # integrator replaces spaces with underscores in filenames - re.escape(colorspace) for colorspace in + re.escape(colorspace) # Sort by longest first so the regex matches longer matches # over smaller matches, e.g. matching 'Output - sRGB' over 'sRGB' - sorted(colorspaces, key=len, reverse=True) + for colorspace in sorted(colorspaces, key=len, reverse=True) ) return re.compile(pattern) @@ -395,6 +419,7 @@ def validate_imageio_colorspace_in_config(config_path, colorspace_name): Returns: bool: True if exists + """ colorspaces = get_ocio_config_colorspaces(config_path)["colorspaces"] if colorspace_name not in colorspaces: @@ -405,28 +430,10 @@ def validate_imageio_colorspace_in_config(config_path, colorspace_name): return True -# TODO: remove this in future - backward compatibility -@deprecated("_get_wrapped_with_subprocess") -def get_data_subprocess(config_path, data_type): - """[Deprecated] Get data via subprocess - - Wrapper for Python 2 hosts. +def _get_wrapped_with_subprocess(command, **kwargs): + """Get data via subprocess. Args: - config_path (str): path leading to config.ocio file - """ - return _get_wrapped_with_subprocess( - "config", data_type, in_path=config_path, - ) - - -def _get_wrapped_with_subprocess(command_group, command, **kwargs): - """Get data via subprocess - - Wrapper for Python 2 hosts. - - Args: - command_group (str): command group name command (str): command name **kwargs: command arguments @@ -436,14 +443,15 @@ def _get_wrapped_with_subprocess(command_group, command, **kwargs): with _make_temp_json_file() as tmp_json_path: # Prepare subprocess arguments args = [ - "run", get_ocio_config_script_path(), - command_group, command + "run", + get_ocio_config_script_path(), + command ] - for key_, value_ in kwargs.items(): - args.extend(("--{}".format(key_), value_)) + for key, value in kwargs.items(): + args.extend(("--{}".format(key), value)) - args.append("--out_path") + args.append("--output_path") args.append(tmp_json_path) log.info("Executing: {}".format(" ".join(args))) @@ -451,55 +459,23 @@ def _get_wrapped_with_subprocess(command_group, command, **kwargs): run_ayon_launcher_process(*args, logger=log) # return all colorspaces - with open(tmp_json_path, "r") as f_: - return json.load(f_) + with open(tmp_json_path, "r") as stream: + return json.load(stream) -# TODO: this should be part of ocio_wrapper.py -def compatibility_check(): - """Making sure PyOpenColorIO is importable""" - if CachedData.has_compatible_ocio_package is not None: - return CachedData.has_compatible_ocio_package - - try: - import PyOpenColorIO # noqa: F401 - CachedData.has_compatible_ocio_package = True - except ImportError: - CachedData.has_compatible_ocio_package = False - - # compatible - return CachedData.has_compatible_ocio_package - - -# TODO: this should be part of ocio_wrapper.py def compatibility_check_config_version(config_path, major=1, minor=None): """Making sure PyOpenColorIO config version is compatible""" - if not CachedData.config_version_data.get(config_path): - if compatibility_check(): - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_version_data - - CachedData.config_version_data[config_path] = \ - _get_version_data(config_path) - - else: - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - CachedData.config_version_data[config_path] = \ - _get_wrapped_with_subprocess( - "config", "get_version", config_path=config_path - ) + version_data = get_config_version_data(config_path) # check major version - if CachedData.config_version_data[config_path]["major"] != major: + if version_data["major"] != major: return False # check minor version - if minor and CachedData.config_version_data[config_path]["minor"] != minor: + if minor is not None and version_data["minor"] != minor: return False - # compatible return True @@ -514,23 +490,19 @@ def get_ocio_config_colorspaces(config_path): Returns: dict: colorspace and family in couple + """ - if not CachedData.ocio_config_colorspaces.get(config_path): - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - CachedData.ocio_config_colorspaces[config_path] = \ - _get_wrapped_with_subprocess( - "config", "get_colorspace", in_path=config_path - ) + if config_path not in CachedData.ocio_config_colorspaces: + if has_compatible_ocio_package(): + config_colorspaces = _get_ocio_config_colorspaces(config_path) else: - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_colorspace_data + config_colorspaces = _get_wrapped_with_subprocess( + "get_ocio_config_colorspaces", + config_path=config_path + ) + CachedData.ocio_config_colorspaces[config_path] = config_colorspaces - CachedData.ocio_config_colorspaces[config_path] = \ - _get_colorspace_data(config_path) - - return CachedData.ocio_config_colorspaces[config_path] + return deepcopy(CachedData.ocio_config_colorspaces[config_path]) def convert_colorspace_enumerator_item( @@ -540,11 +512,12 @@ def convert_colorspace_enumerator_item( """Convert colorspace enumerator item to dictionary Args: - colorspace_item (str): colorspace and family in couple - config_items (dict[str,dict]): colorspace data + colorspace_enum_item (str): Colorspace and family in couple. + config_items (dict[str,dict]): Colorspace data. Returns: dict: colorspace data + """ if "::" not in colorspace_enum_item: return None @@ -603,16 +576,18 @@ def get_colorspaces_enumerator_items( Families can be used for building menu and submenus in gui. Args: - config_items (dict[str,dict]): colorspace data coming from - `get_ocio_config_colorspaces` function - include_aliases (bool): include aliases in result - include_looks (bool): include looks in result - include_roles (bool): include roles in result + config_items (dict[str,dict]): Colorspace data coming from + `get_ocio_config_colorspaces` function. + include_aliases (Optional[bool]): Include aliases in result. + include_looks (Optional[bool]): Include looks in result. + include_roles (Optional[bool]): Include roles in result. + include_display_views (Optional[bool]): Include display views + in result. Returns: - list[tuple[str,str]]: colorspace and family in couple + list[tuple[str, str]]: Colorspace and family in couples. + """ - labeled_colorspaces = [] aliases = set() colorspaces = set() looks = set() @@ -622,86 +597,86 @@ def get_colorspaces_enumerator_items( if items_type == "colorspaces": for color_name, color_data in colorspace_items.items(): if color_data.get("aliases"): - aliases.update([ + aliases.update({ ( "aliases::{}".format(alias_name), "[alias] {} ({})".format(alias_name, color_name) ) for alias_name in color_data["aliases"] - ]) + }) colorspaces.add(( "{}::{}".format(items_type, color_name), "[colorspace] {}".format(color_name) )) elif items_type == "looks": - looks.update([ + looks.update({ ( "{}::{}".format(items_type, name), "[look] {} ({})".format(name, role_data["process_space"]) ) for name, role_data in colorspace_items.items() - ]) + }) elif items_type == "displays_views": - display_views.update([ + display_views.update({ ( "{}::{}".format(items_type, name), "[view (display)] {}".format(name) ) for name, _ in colorspace_items.items() - ]) + }) elif items_type == "roles": - roles.update([ + roles.update({ ( "{}::{}".format(items_type, name), "[role] {} ({})".format(name, role_data["colorspace"]) ) for name, role_data in colorspace_items.items() - ]) + }) - if roles and include_roles: - roles = sorted(roles, key=lambda x: x[0]) - labeled_colorspaces.extend(roles) + def _sort_key_getter(item): + """Use colorspace for sorting. - # add colorspaces as second so it is not first in menu - colorspaces = sorted(colorspaces, key=lambda x: x[0]) - labeled_colorspaces.extend(colorspaces) + Args: + item (tuple[str, str]): Item with colorspace and label. - if aliases and include_aliases: - aliases = sorted(aliases, key=lambda x: x[0]) - labeled_colorspaces.extend(aliases) + Returns: + str: Colorspace. - if looks and include_looks: - looks = sorted(looks, key=lambda x: x[0]) - labeled_colorspaces.extend(looks) + """ + return item[0] - if display_views and include_display_views: - display_views = sorted(display_views, key=lambda x: x[0]) - labeled_colorspaces.extend(display_views) + labeled_colorspaces = [] + if include_roles: + labeled_colorspaces.extend( + sorted(roles, key=_sort_key_getter) + ) + + # Add colorspaces after roles, so it is not first in menu + labeled_colorspaces.extend( + sorted(colorspaces, key=_sort_key_getter) + ) + + if include_aliases: + labeled_colorspaces.extend( + sorted(aliases, key=_sort_key_getter) + ) + + if include_looks: + labeled_colorspaces.extend( + sorted(looks, key=_sort_key_getter) + ) + + if include_display_views: + labeled_colorspaces.extend( + sorted(display_views, key=_sort_key_getter) + ) return labeled_colorspaces -# TODO: remove this in future - backward compatibility -@deprecated("_get_wrapped_with_subprocess") -def get_colorspace_data_subprocess(config_path): - """[Deprecated] Get colorspace data via subprocess - - Wrapper for Python 2 hosts. - - Args: - config_path (str): path leading to config.ocio file - - Returns: - dict: colorspace and family in couple - """ - return _get_wrapped_with_subprocess( - "config", "get_colorspace", in_path=config_path - ) - - def get_ocio_config_views(config_path): """Get all viewer data @@ -713,212 +688,346 @@ def get_ocio_config_views(config_path): Returns: dict: `display/viewer` and viewer data + """ - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - return _get_wrapped_with_subprocess( - "config", "get_views", in_path=config_path - ) + if has_compatible_ocio_package(): + return _get_ocio_config_views(config_path) - # TODO: refactor this so it is not imported but part of this file - from ayon_core.scripts.ocio_wrapper import _get_views_data - - return _get_views_data(config_path) - - -# TODO: remove this in future - backward compatibility -@deprecated("_get_wrapped_with_subprocess") -def get_views_data_subprocess(config_path): - """[Deprecated] Get viewers data via subprocess - - Wrapper for Python 2 hosts. - - Args: - config_path (str): path leading to config.ocio file - - Returns: - dict: `display/viewer` and viewer data - """ return _get_wrapped_with_subprocess( - "config", "get_views", in_path=config_path + "get_ocio_config_views", + config_path=config_path ) -def get_imageio_config( +def _get_global_config_data( project_name, host_name, - project_settings=None, - anatomy_data=None, + anatomy, + template_data, + imageio_global, + folder_id, + log, +): + """Get global config data. + + Global config from core settings is using profiles that are based on + host name, task name and task type. The filtered profile can define 3 + types of config sources: + 1. AYON ocio addon configs. + 2. Custom path to ocio config. + 3. Path to 'ocioconfig' representation on product. Name of product can be + defined in settings. Product name can be regex but exact match is + always preferred. + + None is returned when no profile is found, when path + + Args: + project_name (str): Project name. + host_name (str): Host name. + anatomy (Anatomy): Project anatomy object. + template_data (dict[str, Any]): Template data. + imageio_global (dict[str, Any]): Core imagio settings. + folder_id (Union[dict[str, Any], None]): Folder id. + log (logging.Logger): Logger object. + + Returns: + Union[dict[str, str], None]: Config data with path and template + or None. + + """ + task_name = task_type = None + task_data = template_data.get("task") + if task_data: + task_name = task_data["name"] + task_type = task_data["type"] + + filter_values = { + "task_names": task_name, + "task_types": task_type, + "host_names": host_name, + } + profile = filter_profiles( + imageio_global["ocio_config_profiles"], filter_values + ) + if profile is None: + log.info(f"No config profile matched filters {str(filter_values)}") + return None + + profile_type = profile["type"] + if profile_type in ("builtin_path", "custom_path"): + template = profile[profile_type] + result = StringTemplate.format_strict_template( + template, template_data + ) + normalized_path = str(result.normalized()) + if not os.path.exists(normalized_path): + log.warning(f"Path was not found '{normalized_path}'.") + return None + + return { + "path": normalized_path, + "template": template + } + + # TODO decide if this is the right name for representation + repre_name = "ocioconfig" + + folder_info = template_data.get("folder") + if not folder_info: + log.warning("Folder info is missing.") + return None + folder_path = folder_info["path"] + + product_name = profile["product_name"] + if folder_id is None: + folder_entity = ayon_api.get_folder_by_path( + project_name, folder_path, fields={"id"} + ) + if not folder_entity: + log.warning(f"Folder entity '{folder_path}' was not found..") + return None + folder_id = folder_entity["id"] + + product_entities_by_name = { + product_entity["name"]: product_entity + for product_entity in ayon_api.get_products( + project_name, + folder_ids={folder_id}, + product_name_regex=product_name, + fields={"id", "name"} + ) + } + if not product_entities_by_name: + log.debug( + f"No product entities were found for folder '{folder_path}' with" + f" product name filter '{product_name}'." + ) + return None + + # Try to use exact match first, otherwise use first available product + product_entity = product_entities_by_name.get(product_name) + if product_entity is None: + product_entity = next(iter(product_entities_by_name.values())) + + product_name = product_entity["name"] + # Find last product version + version_entity = ayon_api.get_last_version_by_product_id( + project_name, + product_id=product_entity["id"], + fields={"id"} + ) + if not version_entity: + log.info( + f"Product '{product_name}' does not have available any versions." + ) + return None + + # Find 'ocioconfig' representation entity + repre_entity = ayon_api.get_representation_by_name( + project_name, + representation_name=repre_name, + version_id=version_entity["id"], + ) + if not repre_entity: + log.debug( + f"Representation '{repre_name}'" + f" not found on product '{product_name}'." + ) + return None + + path = get_representation_path_with_anatomy(repre_entity, anatomy) + template = repre_entity["attrib"]["template"] + return { + "path": path, + "template": template, + } + + +def get_imageio_config_preset( + project_name, + folder_path, + task_name, + host_name, anatomy=None, - env=None + project_settings=None, + template_data=None, + env=None, + folder_id=None, ): """Returns config data from settings - Config path is formatted in `path` key - and original settings input is saved into `template` key. + Output contains 'path' key and 'template' key holds its template. + + Template data can be prepared with 'get_template_data'. Args: - project_name (str): project name - host_name (str): host name + project_name (str): Project name. + folder_path (str): Folder path. + task_name (str): Task name. + host_name (str): Host name. + anatomy (Optional[Anatomy]): Project anatomy object. project_settings (Optional[dict]): Project settings. - anatomy_data (Optional[dict]): anatomy formatting data. - anatomy (Optional[Anatomy]): Anatomy object. - env (Optional[dict]): Environment variables. + template_data (Optional[dict]): Template data used for + template formatting. + env (Optional[dict]): Environment variables. Environments are used + for template formatting too. Values from 'os.environ' are used + when not provided. + folder_id (Optional[str]): Folder id. Is used only when config path + is received from published representation. Is autofilled when + not provided. Returns: dict: config path data or empty dict + """ - project_settings = project_settings or get_project_settings(project_name) - anatomy = anatomy or Anatomy(project_name) - - if not anatomy_data: - from ayon_core.pipeline.context_tools import ( - get_current_context_template_data) - anatomy_data = get_current_context_template_data() - - formatting_data = deepcopy(anatomy_data) - - # Add project roots to anatomy data - formatting_data["root"] = anatomy.roots - formatting_data["platform"] = platform.system().lower() + if not project_settings: + project_settings = get_project_settings(project_name) # Get colorspace settings imageio_global, imageio_host = _get_imageio_settings( - project_settings, host_name) + project_settings, host_name + ) + # Global color management must be enabled to be able to use host settings + if not imageio_global["activate_global_color_management"]: + log.info("Colorspace management is disabled globally.") + return {} # Host 'ocio_config' is optional host_ocio_config = imageio_host.get("ocio_config") or {} - - # Global color management must be enabled to be able to use host settings - activate_color_management = imageio_global.get( - "activate_global_color_management") - # TODO: remove this in future - backward compatibility - # For already saved overrides from previous version look for 'enabled' - # on host settings. - if activate_color_management is None: - activate_color_management = host_ocio_config.get("enabled", False) - - if not activate_color_management: - # if global settings are disabled return empty dict because - # it is expected that no colorspace management is needed - log.info("Colorspace management is disabled globally.") - return {} + # TODO remove + # - backward compatibility when host settings had only 'enabled' flag + # the flag was split into 'activate_global_color_management' + # and 'override_global_config' + host_ocio_config_enabled = host_ocio_config.get("enabled", False) # Check if host settings group is having 'activate_host_color_management' # - if it does not have activation key then default it to True so it uses # global settings - # This is for backward compatibility. - # TODO: in future rewrite this to be more explicit activate_host_color_management = imageio_host.get( - "activate_host_color_management") - - # TODO: remove this in future - backward compatibility + "activate_host_color_management" + ) if activate_host_color_management is None: - activate_host_color_management = host_ocio_config.get("enabled", False) + activate_host_color_management = host_ocio_config_enabled if not activate_host_color_management: # if host settings are disabled return False because # it is expected that no colorspace management is needed log.info( - "Colorspace management for host '{}' is disabled.".format( - host_name) + f"Colorspace management for host '{host_name}' is disabled." ) return {} - # get config path from either global or host settings - # depending on override flag + project_entity = None + if anatomy is None: + project_entity = ayon_api.get_project(project_name) + anatomy = Anatomy(project_name, project_entity=project_entity) + + if env is None: + env = dict(os.environ.items()) + + if template_data: + template_data = deepcopy(template_data) + else: + if not project_entity: + project_entity = ayon_api.get_project(project_name) + + folder_entity = task_entity = folder_id = None + if folder_path: + folder_entity = ayon_api.get_folder_by_path( + project_name, folder_path + ) + folder_id = folder_entity["id"] + + if folder_id and task_name: + task_entity = ayon_api.get_task_by_name( + project_name, folder_id, task_name + ) + template_data = get_template_data( + project_entity, + folder_entity, + task_entity, + host_name, + project_settings, + ) + + # Add project roots to anatomy data + template_data["root"] = anatomy.roots + template_data["platform"] = platform.system().lower() + + # Add environment variables to template data + template_data.update(env) + + # Get config path from core or host settings + # - based on override flag in host settings # TODO: in future rewrite this to be more explicit override_global_config = host_ocio_config.get("override_global_config") if override_global_config is None: - # for already saved overrides from previous version - # TODO: remove this in future - backward compatibility - override_global_config = host_ocio_config.get("enabled") + override_global_config = host_ocio_config_enabled - if override_global_config: - config_data = _get_config_data( - host_ocio_config["filepath"], formatting_data, env + if not override_global_config: + config_data = _get_global_config_data( + project_name, + host_name, + anatomy, + template_data, + imageio_global, + folder_id, + log, ) else: - # get config path from global - config_global = imageio_global["ocio_config"] - config_data = _get_config_data( - config_global["filepath"], formatting_data, env + config_data = _get_host_config_data( + host_ocio_config["filepath"], template_data ) if not config_data: raise FileExistsError( - "No OCIO config found in settings. It is " - "either missing or there is typo in path inputs" + "No OCIO config found in settings. It is" + " either missing or there is typo in path inputs" ) return config_data -def _get_config_data(path_list, anatomy_data, env=None): +def _get_host_config_data(templates, template_data): """Return first existing path in path list. - If template is used in path inputs, - then it is formatted by anatomy data - and environment variables + Use template data to fill possible formatting in paths. Args: - path_list (list[str]): list of abs paths - anatomy_data (dict): formatting data - env (Optional[dict]): Environment variables. + templates (list[str]): List of templates to config paths. + template_data (dict): Template data used to format templates. Returns: - dict: config data + Union[dict, None]: Config data or 'None' if templates are empty + or any path exists. + """ - formatting_data = deepcopy(anatomy_data) - - environment_vars = env or dict(**os.environ) - - # format the path for potential env vars - formatting_data.update(environment_vars) - - # first try host config paths - for path_ in path_list: - formatted_path = _format_path(path_, formatting_data) - - if not os.path.exists(formatted_path): + for template in templates: + formatted_path = StringTemplate.format_template( + template, template_data + ) + if not formatted_path.solved: continue - return { - "path": os.path.normpath(formatted_path), - "template": path_ - } - - -def _format_path(template_path, formatting_data): - """Single template path formatting. - - Args: - template_path (str): template string - formatting_data (dict): data to be used for - template formatting - - Returns: - str: absolute formatted path - """ - # format path for anatomy keys - formatted_path = StringTemplate(template_path).format( - formatting_data) - - return os.path.abspath(formatted_path) + path = os.path.abspath(formatted_path) + if os.path.exists(path): + return { + "path": os.path.normpath(path), + "template": template + } def get_imageio_file_rules(project_name, host_name, project_settings=None): """Get ImageIO File rules from project settings Args: - project_name (str): project name - host_name (str): host name - project_settings (dict, optional): project settings. - Defaults to None. + project_name (str): Project name. + host_name (str): Host name. + project_settings (Optional[dict]): Project settings. Returns: list[dict[str, Any]]: file rules data + """ project_settings = project_settings or get_project_settings(project_name) @@ -960,7 +1069,7 @@ def get_remapped_colorspace_to_native( """Return native colorspace name. Args: - ocio_colorspace_name (str | None): ocio colorspace name + ocio_colorspace_name (str | None): OCIO colorspace name. host_name (str): Host name. imageio_host_settings (dict[str, Any]): ImageIO host settings. @@ -968,16 +1077,15 @@ def get_remapped_colorspace_to_native( Union[str, None]: native colorspace name defined in remapping or None """ - CachedData.remapping.setdefault(host_name, {}) - if CachedData.remapping[host_name].get("to_native") is None: + host_mapping = CachedData.remapping.setdefault(host_name, {}) + if "to_native" not in host_mapping: remapping_rules = imageio_host_settings["remapping"]["rules"] - CachedData.remapping[host_name]["to_native"] = { + host_mapping["to_native"] = { rule["ocio_name"]: rule["host_native_name"] for rule in remapping_rules } - return CachedData.remapping[host_name]["to_native"].get( - ocio_colorspace_name) + return host_mapping["to_native"].get(ocio_colorspace_name) def get_remapped_colorspace_from_native( @@ -992,30 +1100,29 @@ def get_remapped_colorspace_from_native( Returns: Union[str, None]: Ocio colorspace name defined in remapping or None. - """ - CachedData.remapping.setdefault(host_name, {}) - if CachedData.remapping[host_name].get("from_native") is None: + """ + host_mapping = CachedData.remapping.setdefault(host_name, {}) + if "from_native" not in host_mapping: remapping_rules = imageio_host_settings["remapping"]["rules"] - CachedData.remapping[host_name]["from_native"] = { + host_mapping["from_native"] = { rule["host_native_name"]: rule["ocio_name"] for rule in remapping_rules } - return CachedData.remapping[host_name]["from_native"].get( - host_native_colorspace_name) + return host_mapping["from_native"].get(host_native_colorspace_name) def _get_imageio_settings(project_settings, host_name): """Get ImageIO settings for global and host Args: - project_settings (dict): project settings. - Defaults to None. - host_name (str): host name + project_settings (dict[str, Any]): Project settings. + host_name (str): Host name. Returns: - tuple[dict, dict]: image io settings for global and host + tuple[dict, dict]: Image io settings for global and host. + """ # get image io from global and host_name imageio_global = project_settings["core"]["imageio"] @@ -1033,27 +1140,41 @@ def get_colorspace_settings_from_publish_context(context_data): Returns: tuple | bool: config, file rules or None + """ if "imageioSettings" in context_data and context_data["imageioSettings"]: return context_data["imageioSettings"] project_name = context_data["projectName"] + folder_path = context_data["folderPath"] + task_name = context_data["task"] host_name = context_data["hostName"] - anatomy_data = context_data["anatomyData"] - project_settings_ = context_data["project_settings"] + anatomy = context_data["anatomy"] + template_data = context_data["anatomyData"] + project_settings = context_data["project_settings"] + folder_id = None + folder_entity = context_data.get("folderEntity") + if folder_entity: + folder_id = folder_entity["id"] - config_data = get_imageio_config( - project_name, host_name, - project_settings=project_settings_, - anatomy_data=anatomy_data + config_data = get_imageio_config_preset( + project_name, + folder_path, + task_name, + host_name, + anatomy=anatomy, + project_settings=project_settings, + template_data=template_data, + folder_id=folder_id, ) # caching invalid state, so it's not recalculated all the time file_rules = None if config_data: file_rules = get_imageio_file_rules( - project_name, host_name, - project_settings=project_settings_ + project_name, + host_name, + project_settings=project_settings ) # caching settings for future instance processing @@ -1063,18 +1184,13 @@ def get_colorspace_settings_from_publish_context(context_data): def set_colorspace_data_to_representation( - representation, context_data, + representation, + context_data, colorspace=None, log=None ): """Sets colorspace data to representation. - Args: - representation (dict): publishing representation - context_data (publish.Context.data): publishing context data - colorspace (str, optional): colorspace name. Defaults to None. - log (logging.Logger, optional): logger instance. Defaults to None. - Example: ``` { @@ -1089,6 +1205,12 @@ def set_colorspace_data_to_representation( } ``` + Args: + representation (dict): publishing representation + context_data (publish.Context.data): publishing context data + colorspace (Optional[str]): Colorspace name. + log (Optional[logging.Logger]): logger instance. + """ log = log or Logger.get_logger(__name__) @@ -1122,12 +1244,15 @@ def set_colorspace_data_to_representation( filename = filename[0] # get matching colorspace from rules - colorspace = colorspace or get_imageio_colorspace_from_filepath( - filename, host_name, project_name, - config_data=config_data, - file_rules=file_rules, - project_settings=project_settings - ) + if colorspace is None: + colorspace = get_imageio_file_rules_colorspace_from_filepath( + filename, + host_name, + project_name, + config_data=config_data, + file_rules=file_rules, + project_settings=project_settings + ) # infuse data to representation if colorspace: @@ -1149,47 +1274,330 @@ def get_display_view_colorspace_name(config_path, display, view): view (str): view name e.g. "sRGB" Returns: - view color space name (str) e.g. "Output - sRGB" + str: View color space name. e.g. "Output - sRGB" + """ - - if not compatibility_check(): - # python environment is not compatible with PyOpenColorIO - # needs to be run in subprocess - return get_display_view_colorspace_subprocess(config_path, - display, view) - - from ayon_core.scripts.ocio_wrapper import _get_display_view_colorspace_name # noqa - - return _get_display_view_colorspace_name(config_path, display, view) + if has_compatible_ocio_package(): + return _get_display_view_colorspace_name( + config_path, display, view + ) + return _get_wrapped_with_subprocess( + "get_display_view_colorspace_name", + config_path=config_path, + display=display, + view=view + ) -def get_display_view_colorspace_subprocess(config_path, display, view): - """Returns the colorspace attribute of the (display, view) pair - via subprocess. +# --- Implementation of logic using 'PyOpenColorIO' --- +def _get_ocio_config(config_path): + """Helper function to create OCIO config object. + + Args: + config_path (str): Path to config. + + Returns: + PyOpenColorIO.Config: OCIO config for the confing path. + + """ + import PyOpenColorIO + + config_path = os.path.abspath(config_path) + + if not os.path.isfile(config_path): + raise IOError("Input path should be `config.ocio` file") + + return PyOpenColorIO.Config.CreateFromFile(config_path) + + +def _get_config_file_rules_colorspace_from_filepath(config_path, filepath): + """Return found colorspace data found in v2 file rules. + + Args: + config_path (str): path string leading to config.ocio + filepath (str): path string leading to v2 file rules + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available colorspaces + + """ + config = _get_ocio_config(config_path) + + # TODO: use `parseColorSpaceFromString` instead if ocio v1 + return config.getColorSpaceFromFilepath(str(filepath)) + + +def _get_config_version_data(config_path): + """Return major and minor version info. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: minor and major keys with values + + """ + config = _get_ocio_config(config_path) + + return { + "major": config.getMajorVersion(), + "minor": config.getMinorVersion() + } + + +def _get_display_view_colorspace_name(config_path, display, view): + """Returns the colorspace attribute of the (display, view) pair. Args: config_path (str): path string leading to config.ocio display (str): display name e.g. "ACES" view (str): view name e.g. "sRGB" + Raises: + IOError: Input config does not exist. + Returns: - view color space name (str) e.g. "Output - sRGB" + str: view color space name e.g. "Output - sRGB" + + """ + config = _get_ocio_config(config_path) + return config.getDisplayViewColorSpaceName(display, view) + + +def _get_ocio_config_colorspaces(config_path): + """Return all found colorspace data. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available colorspaces + + """ + config = _get_ocio_config(config_path) + + colorspace_data = { + "roles": {}, + "colorspaces": { + color.getName(): { + "family": color.getFamily(), + "categories": list(color.getCategories()), + "aliases": list(color.getAliases()), + "equalitygroup": color.getEqualityGroup(), + } + for color in config.getColorSpaces() + }, + "displays_views": { + f"{view} ({display})": { + "display": display, + "view": view + + } + for display in config.getDisplays() + for view in config.getViews(display) + }, + "looks": {} + } + + # add looks + looks = config.getLooks() + if looks: + colorspace_data["looks"] = { + look.getName(): {"process_space": look.getProcessSpace()} + for look in looks + } + + # add roles + roles = config.getRoles() + if roles: + colorspace_data["roles"] = { + role: {"colorspace": colorspace} + for (role, colorspace) in roles + } + + return colorspace_data + + +def _get_ocio_config_views(config_path): + """Return all found viewer data. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available viewers + + """ + config = _get_ocio_config(config_path) + + output = {} + for display in config.getDisplays(): + for view in config.getViews(display): + colorspace = config.getDisplayViewColorSpaceName(display, view) + # Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa + if colorspace == "": + colorspace = display + + output[f"{display}/{view}"] = { + "display": display, + "view": view, + "colorspace": colorspace + } + + return output + + +# --- Current context functions --- +def get_current_context_imageio_config_preset( + anatomy=None, + project_settings=None, + template_data=None, + env=None, +): + """Get ImageIO config preset for current context. + + Args: + anatomy (Optional[Anatomy]): Current project anatomy. + project_settings (Optional[dict[str, Any]]): Current project settings. + template_data (Optional[dict[str, Any]]): Prepared template data + for current context. + env (Optional[dict[str, str]]): Custom environment variable values. + + Returns: + dict: ImageIO config preset. + + """ + from .context_tools import get_current_context, get_current_host_name + + context = get_current_context() + host_name = get_current_host_name() + return get_imageio_config_preset( + context["project_name"], + context["folder_path"], + context["task_name"], + host_name, + anatomy=anatomy, + project_settings=project_settings, + template_data=template_data, + env=env, + ) + + +# --- Deprecated functions --- +@deprecated("has_compatible_ocio_package") +def compatibility_check(): + """Making sure PyOpenColorIO is importable + + Deprecated: + Deprecated since '0.3.2'. Use `has_compatible_ocio_package` instead. """ - with _make_temp_json_file() as tmp_json_path: - # Prepare subprocess arguments - args = [ - "run", get_ocio_config_script_path(), - "config", "get_display_view_colorspace_name", - "--in_path", config_path, - "--out_path", tmp_json_path, - "--display", display, - "--view", view - ] - log.debug("Executing: {}".format(" ".join(args))) + return has_compatible_ocio_package() - run_ayon_launcher_process(*args, logger=log) - # return default view colorspace name - with open(tmp_json_path, "r") as f: - return json.load(f) +@deprecated("get_imageio_file_rules_colorspace_from_filepath") +def get_imageio_colorspace_from_filepath(*args, **kwargs): + return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) + + +@deprecated("get_imageio_file_rules_colorspace_from_filepath") +def get_colorspace_from_filepath(*args, **kwargs): + return get_imageio_file_rules_colorspace_from_filepath(*args, **kwargs) + + +@deprecated("_get_wrapped_with_subprocess") +def get_colorspace_data_subprocess(config_path): + """[Deprecated] Get colorspace data via subprocess + + Deprecated: + Deprecated since OpenPype. Use `_get_wrapped_with_subprocess` instead. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: colorspace and family in couple + """ + return _get_wrapped_with_subprocess( + "get_ocio_config_colorspaces", + config_path=config_path + ) + + +@deprecated("_get_wrapped_with_subprocess") +def get_views_data_subprocess(config_path): + """[Deprecated] Get viewers data via subprocess + + Deprecated: + Deprecated since OpenPype. Use `_get_wrapped_with_subprocess` instead. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: `display/viewer` and viewer data + + """ + return _get_wrapped_with_subprocess( + "get_ocio_config_views", + config_path=config_path + ) + + +@deprecated("get_imageio_config_preset") +def get_imageio_config( + project_name, + host_name, + project_settings=None, + anatomy_data=None, + anatomy=None, + env=None +): + """Returns config data from settings + + Config path is formatted in `path` key + and original settings input is saved into `template` key. + + Deprecated: + Deprecated since '0.3.1' . Use `get_imageio_config_preset` instead. + + Args: + project_name (str): project name + host_name (str): host name + project_settings (Optional[dict]): Project settings. + anatomy_data (Optional[dict]): anatomy formatting data. + anatomy (Optional[Anatomy]): Anatomy object. + env (Optional[dict]): Environment variables. + + Returns: + dict: config path data or empty dict + + """ + if not anatomy_data: + from .context_tools import get_current_context_template_data + anatomy_data = get_current_context_template_data() + + task_name = anatomy_data.get("task", {}).get("name") + folder_path = anatomy_data.get("folder", {}).get("path") + return get_imageio_config_preset( + project_name, + folder_path, + task_name, + host_name, + anatomy=anatomy, + project_settings=project_settings, + template_data=anatomy_data, + env=env, + ) diff --git a/client/ayon_core/pipeline/context_tools.py b/client/ayon_core/pipeline/context_tools.py index 33567d7280..c32d04c44c 100644 --- a/client/ayon_core/pipeline/context_tools.py +++ b/client/ayon_core/pipeline/context_tools.py @@ -459,36 +459,6 @@ def is_representation_from_latest(representation): ) -def get_template_data_from_session(session=None, settings=None): - """Template data for template fill from session keys. - - Args: - session (Union[Dict[str, str], None]): The Session to use. If not - provided use the currently active global Session. - settings (Optional[Dict[str, Any]]): Prepared studio or project - settings. - - Returns: - Dict[str, Any]: All available data from session. - """ - - if session is not None: - project_name = session["AYON_PROJECT_NAME"] - folder_path = session["AYON_FOLDER_PATH"] - task_name = session["AYON_TASK_NAME"] - host_name = session["AYON_HOST_NAME"] - else: - context = get_current_context() - project_name = context["project_name"] - folder_path = context["folder_path"] - task_name = context["task_name"] - host_name = get_current_host_name() - - return get_template_data_with_names( - project_name, folder_path, task_name, host_name, settings - ) - - def get_current_context_template_data(settings=None): """Prepare template data for current context. diff --git a/client/ayon_core/pipeline/create/context.py b/client/ayon_core/pipeline/create/context.py index b8618738fb..7615ce6aee 100644 --- a/client/ayon_core/pipeline/create/context.py +++ b/client/ayon_core/pipeline/create/context.py @@ -1987,12 +1987,12 @@ class CreateContext: "Folder '{}' was not found".format(folder_path) ) - task_name = None if task_entity is None: - task_name = self.get_current_task_name() - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) + current_task_name = self.get_current_task_name() + if current_task_name: + task_entity = ayon_api.get_task_by_name( + project_name, folder_entity["id"], current_task_name + ) if pre_create_data is None: pre_create_data = {} @@ -2018,7 +2018,7 @@ class CreateContext: instance_data = { "folderPath": folder_entity["path"], - "task": task_name, + "task": task_entity["name"] if task_entity else None, "productType": creator.product_type, "variant": variant } @@ -2053,7 +2053,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 add_traceback = True exc_info = sys.exc_info() self.log.warning( @@ -2163,7 +2163,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2197,7 +2197,7 @@ class CreateContext: try: convertor.find_instances() - except: + except: # noqa: E722 failed_info.append( prepare_failed_convertor_operation_info( convertor.identifier, sys.exc_info() @@ -2373,7 +2373,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2440,7 +2440,7 @@ class CreateContext: error_message.format(identifier, exc_info[1]) ) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2546,7 +2546,7 @@ class CreateContext: try: self.run_convertor(convertor_identifier) - except: + except: # noqa: E722 failed_info.append( prepare_failed_convertor_operation_info( convertor_identifier, sys.exc_info() diff --git a/client/ayon_core/pipeline/farm/pyblish_functions.py b/client/ayon_core/pipeline/farm/pyblish_functions.py index eb6f8569d9..72deee185e 100644 --- a/client/ayon_core/pipeline/farm/pyblish_functions.py +++ b/client/ayon_core/pipeline/farm/pyblish_functions.py @@ -225,6 +225,7 @@ def create_skeleton_instance( instance_skeleton_data = { "productType": product_type, "productName": data["productName"], + "task": data["task"], "families": families, "folderPath": data["folderPath"], "frameStart": time_data.start, diff --git a/client/ayon_core/pipeline/publish/abstract_collect_render.py b/client/ayon_core/pipeline/publish/abstract_collect_render.py index c50dc16380..17cab876b6 100644 --- a/client/ayon_core/pipeline/publish/abstract_collect_render.py +++ b/client/ayon_core/pipeline/publish/abstract_collect_render.py @@ -80,6 +80,7 @@ class RenderInstance(object): anatomyData = attr.ib(default=None) outputDir = attr.ib(default=None) context = attr.ib(default=None) + deadline = attr.ib(default=None) # The source instance the data of this render instance should merge into source_instance = attr.ib(default=None, type=pyblish.api.Instance) @@ -215,13 +216,12 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): # add additional data data = self.add_additional_data(data) - render_instance_dict = attr.asdict(render_instance) - # Merge into source instance if provided, otherwise create instance - instance = render_instance_dict.pop("source_instance", None) + instance = render_instance.source_instance if instance is None: instance = context.create_instance(render_instance.name) + render_instance_dict = attr.asdict(render_instance) instance.data.update(render_instance_dict) instance.data.update(data) diff --git a/client/ayon_core/pipeline/template_data.py b/client/ayon_core/pipeline/template_data.py index 526c7d35c5..d5f06d6a59 100644 --- a/client/ayon_core/pipeline/template_data.py +++ b/client/ayon_core/pipeline/template_data.py @@ -73,8 +73,8 @@ def get_folder_template_data(folder_entity, project_name): - 'parent' - direct parent name, project name used if is under project - Required document fields: - Folder: 'path' -> Plan to require: 'folderType' + Required entity fields: + Folder: 'path', 'folderType' Args: folder_entity (Dict[str, Any]): Folder entity. @@ -101,6 +101,8 @@ def get_folder_template_data(folder_entity, project_name): return { "folder": { "name": folder_name, + "type": folder_entity["folderType"], + "path": path, }, "asset": folder_name, "hierarchy": hierarchy, diff --git a/client/ayon_core/pipeline/thumbnails.py b/client/ayon_core/pipeline/thumbnails.py new file mode 100644 index 0000000000..dbb38615d8 --- /dev/null +++ b/client/ayon_core/pipeline/thumbnails.py @@ -0,0 +1,263 @@ +import os +import time +import collections + +import ayon_api + +from ayon_core.lib.local_settings import get_ayon_appdirs + + +FileInfo = collections.namedtuple( + "FileInfo", + ("path", "size", "modification_time") +) + + +class ThumbnailsCache: + """Cache of thumbnails on local storage. + + Thumbnails are cached to appdirs to predefined directory. Each project has + own subfolder with thumbnails -> that's because each project has own + thumbnail id validation and file names are thumbnail ids with matching + extension. Extensions are predefined (.png and .jpeg). + + Cache has cleanup mechanism which is triggered on initialized by default. + + The cleanup has 2 levels: + 1. soft cleanup which remove all files that are older then 'days_alive' + 2. max size cleanup which remove all files until the thumbnails folder + contains less then 'max_filesize' + - this is time consuming so it's not triggered automatically + + Args: + cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails). + """ + + # Lifetime of thumbnails (in seconds) + # - default 3 days + days_alive = 3 + # Max size of thumbnail directory (in bytes) + # - default 2 Gb + max_filesize = 2 * 1024 * 1024 * 1024 + + def __init__(self, cleanup=True): + self._thumbnails_dir = None + self._days_alive_secs = self.days_alive * 24 * 60 * 60 + if cleanup: + self.cleanup() + + def get_thumbnails_dir(self): + """Root directory where thumbnails are stored. + + Returns: + str: Path to thumbnails root. + """ + + if self._thumbnails_dir is None: + self._thumbnails_dir = get_ayon_appdirs("thumbnails") + return self._thumbnails_dir + + thumbnails_dir = property(get_thumbnails_dir) + + def get_thumbnails_dir_file_info(self): + """Get information about all files in thumbnails directory. + + Returns: + List[FileInfo]: List of file information about all files. + """ + + thumbnails_dir = self.thumbnails_dir + files_info = [] + if not os.path.exists(thumbnails_dir): + return files_info + + for root, _, filenames in os.walk(thumbnails_dir): + for filename in filenames: + path = os.path.join(root, filename) + files_info.append(FileInfo( + path, os.path.getsize(path), os.path.getmtime(path) + )) + return files_info + + def get_thumbnails_dir_size(self, files_info=None): + """Got full size of thumbnail directory. + + Args: + files_info (List[FileInfo]): Prepared file information about + files in thumbnail directory. + + Returns: + int: File size of all files in thumbnail directory. + """ + + if files_info is None: + files_info = self.get_thumbnails_dir_file_info() + + if not files_info: + return 0 + + return sum( + file_info.size + for file_info in files_info + ) + + def cleanup(self, check_max_size=False): + """Cleanup thumbnails directory. + + Args: + check_max_size (bool): Also cleanup files to match max size of + thumbnails directory. + """ + + thumbnails_dir = self.get_thumbnails_dir() + # Skip if thumbnails dir does not exist yet + if not os.path.exists(thumbnails_dir): + return + + self._soft_cleanup(thumbnails_dir) + if check_max_size: + self._max_size_cleanup(thumbnails_dir) + + def _soft_cleanup(self, thumbnails_dir): + current_time = time.time() + for root, _, filenames in os.walk(thumbnails_dir): + for filename in filenames: + path = os.path.join(root, filename) + modification_time = os.path.getmtime(path) + if current_time - modification_time > self._days_alive_secs: + os.remove(path) + + def _max_size_cleanup(self, thumbnails_dir): + files_info = self.get_thumbnails_dir_file_info() + size = self.get_thumbnails_dir_size(files_info) + if size < self.max_filesize: + return + + sorted_file_info = collections.deque( + sorted(files_info, key=lambda item: item.modification_time) + ) + diff = size - self.max_filesize + while diff > 0: + if not sorted_file_info: + break + + file_info = sorted_file_info.popleft() + diff -= file_info.size + os.remove(file_info.path) + + def get_thumbnail_filepath(self, project_name, thumbnail_id): + """Get thumbnail by thumbnail id. + + Args: + project_name (str): Name of project. + thumbnail_id (str): Thumbnail id. + + Returns: + Union[str, None]: Path to thumbnail image or None if thumbnail + is not cached yet. + """ + + if not thumbnail_id: + return None + + for ext in ( + ".png", + ".jpeg", + ): + filepath = os.path.join( + self.thumbnails_dir, project_name, thumbnail_id + ext + ) + if os.path.exists(filepath): + return filepath + return None + + def get_project_dir(self, project_name): + """Path to root directory for specific project. + + Args: + project_name (str): Name of project for which root directory path + should be returned. + + Returns: + str: Path to root of project's thumbnails. + """ + + return os.path.join(self.thumbnails_dir, project_name) + + def make_sure_project_dir_exists(self, project_name): + project_dir = self.get_project_dir(project_name) + if not os.path.exists(project_dir): + os.makedirs(project_dir) + return project_dir + + def store_thumbnail(self, project_name, thumbnail_id, content, mime_type): + """Store thumbnail to cache folder. + + Args: + project_name (str): Project where the thumbnail belong to. + thumbnail_id (str): Thumbnail id. + content (bytes): Byte content of thumbnail file. + mime_type (str): Type of content. + + Returns: + str: Path to cached thumbnail image file. + """ + + if mime_type == "image/png": + ext = ".png" + elif mime_type == "image/jpeg": + ext = ".jpeg" + else: + raise ValueError( + "Unknown mime type for thumbnail \"{}\"".format(mime_type)) + + project_dir = self.make_sure_project_dir_exists(project_name) + thumbnail_path = os.path.join(project_dir, thumbnail_id + ext) + with open(thumbnail_path, "wb") as stream: + stream.write(content) + + current_time = time.time() + os.utime(thumbnail_path, (current_time, current_time)) + + return thumbnail_path + + +class _CacheItems: + thumbnails_cache = ThumbnailsCache() + + +def get_thumbnail_path(project_name, thumbnail_id): + """Get path to thumbnail image. + + Args: + project_name (str): Project where thumbnail belongs to. + thumbnail_id (Union[str, None]): Thumbnail id. + + Returns: + Union[str, None]: Path to thumbnail image or None if thumbnail + id is not valid or thumbnail was not possible to receive. + + """ + if not thumbnail_id: + return None + + filepath = _CacheItems.thumbnails_cache.get_thumbnail_filepath( + project_name, thumbnail_id + ) + if filepath is not None: + return filepath + + # 'ayon_api' had a bug, public function + # 'get_thumbnail_by_id' did not return output of + # 'ServerAPI' method. + con = ayon_api.get_server_api_connection() + result = con.get_thumbnail_by_id(project_name, thumbnail_id) + + if result is not None and result.is_valid: + return _CacheItems.thumbnails_cache.store_thumbnail( + project_name, + thumbnail_id, + result.content, + result.content_type + ) + return None diff --git a/client/ayon_core/pipeline/workfile/__init__.py b/client/ayon_core/pipeline/workfile/__init__.py index 36766e3a04..05f939024c 100644 --- a/client/ayon_core/pipeline/workfile/__init__.py +++ b/client/ayon_core/pipeline/workfile/__init__.py @@ -21,6 +21,15 @@ from .utils import ( from .build_workfile import BuildWorkfile +from .workfile_template_builder import ( + discover_workfile_build_plugins, + register_workfile_build_plugin, + deregister_workfile_build_plugin, + register_workfile_build_plugin_path, + deregister_workfile_build_plugin_path, +) + + __all__ = ( "get_workfile_template_key_from_context", "get_workfile_template_key", @@ -39,4 +48,10 @@ __all__ = ( "should_open_workfiles_tool_on_launch", "BuildWorkfile", + + "discover_workfile_build_plugins", + "register_workfile_build_plugin", + "deregister_workfile_build_plugin", + "register_workfile_build_plugin_path", + "deregister_workfile_build_plugin_path", ) diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py index 3447520b39..bb94d87483 100644 --- a/client/ayon_core/pipeline/workfile/workfile_template_builder.py +++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py @@ -36,6 +36,7 @@ from ayon_core.lib import ( filter_profiles, attribute_definitions, ) +from ayon_core.lib.events import EventSystem, EventCallback, Event from ayon_core.lib.attribute_definitions import get_attributes_keys from ayon_core.pipeline import Anatomy from ayon_core.pipeline.load import ( @@ -43,6 +44,13 @@ from ayon_core.pipeline.load import ( get_representation_contexts, load_with_repre_context, ) +from ayon_core.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) from ayon_core.pipeline.create import ( discover_legacy_creator_plugins, @@ -124,6 +132,8 @@ class AbstractTemplateBuilder(object): self._current_task_entity = _NOT_SET self._linked_folder_entities = _NOT_SET + self._event_system = EventSystem() + @property def project_name(self): if isinstance(self._host, HostBase): @@ -211,10 +221,14 @@ class AbstractTemplateBuilder(object): Returns: List[PlaceholderPlugin]: Plugin classes available for host. """ + plugins = [] + # Backwards compatibility if hasattr(self._host, "get_workfile_build_placeholder_plugins"): return self._host.get_workfile_build_placeholder_plugins() - return [] + + plugins.extend(discover(PlaceholderPlugin)) + return plugins @property def host(self): @@ -257,6 +271,8 @@ class AbstractTemplateBuilder(object): self._project_settings = None + self._event_system = EventSystem() + self.clear_shared_data() self.clear_shared_populate_data() @@ -735,6 +751,16 @@ class AbstractTemplateBuilder(object): placeholder.set_finished() + # Trigger on_depth_processed event + self.emit_event( + topic="template.depth_processed", + data={ + "depth": iter_counter, + "placeholders_by_scene_id": placeholder_by_scene_id + }, + source="builder" + ) + # Clear shared data before getting new placeholders self.clear_shared_populate_data() @@ -753,6 +779,16 @@ class AbstractTemplateBuilder(object): placeholder_by_scene_id[identifier] = placeholder placeholders.append(placeholder) + # Trigger on_finished event + self.emit_event( + topic="template.finished", + data={ + "depth": iter_counter, + "placeholders_by_scene_id": placeholder_by_scene_id, + }, + source="builder" + ) + self.refresh() def _get_build_profiles(self): @@ -880,6 +916,30 @@ class AbstractTemplateBuilder(object): "create_first_version": create_first_version } + def emit_event(self, topic, data=None, source=None) -> Event: + return self._event_system.emit(topic, data, source) + + def add_event_callback(self, topic, callback, order=None): + return self._event_system.add_callback(topic, callback, order=order) + + def add_on_finished_callback( + self, callback, order=None + ) -> EventCallback: + return self.add_event_callback( + topic="template.finished", + callback=callback, + order=order + ) + + def add_on_depth_processed_callback( + self, callback, order=None + ) -> EventCallback: + return self.add_event_callback( + topic="template.depth_processed", + callback=callback, + order=order + ) + @six.add_metaclass(ABCMeta) class PlaceholderPlugin(object): @@ -1912,3 +1972,23 @@ class CreatePlaceholderItem(PlaceholderItem): def create_failed(self, creator_data): self._failed_created_publish_instances.append(creator_data) + + +def discover_workfile_build_plugins(*args, **kwargs): + return discover(PlaceholderPlugin, *args, **kwargs) + + +def register_workfile_build_plugin(plugin: PlaceholderPlugin): + register_plugin(PlaceholderPlugin, plugin) + + +def deregister_workfile_build_plugin(plugin: PlaceholderPlugin): + deregister_plugin(PlaceholderPlugin, plugin) + + +def register_workfile_build_plugin_path(path: str): + register_plugin_path(PlaceholderPlugin, path) + + +def deregister_workfile_build_plugin_path(path: str): + deregister_plugin_path(PlaceholderPlugin, path) diff --git a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py index f8cc81e718..ad5a5d43fc 100644 --- a/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py +++ b/client/ayon_core/plugins/publish/collect_anatomy_instance_data.py @@ -33,6 +33,7 @@ import collections import pyblish.api import ayon_api +from ayon_core.pipeline.template_data import get_folder_template_data from ayon_core.pipeline.version_start import get_versioning_start @@ -383,24 +384,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): # - 'folder', 'hierarchy', 'parent', 'folder' folder_entity = instance.data.get("folderEntity") if folder_entity: - folder_name = folder_entity["name"] - folder_path = folder_entity["path"] - hierarchy_parts = folder_path.split("/") - hierarchy_parts.pop(0) - hierarchy_parts.pop(-1) - parent_name = project_entity["name"] - if hierarchy_parts: - parent_name = hierarchy_parts[-1] - - hierarchy = "/".join(hierarchy_parts) - anatomy_data.update({ - "asset": folder_name, - "hierarchy": hierarchy, - "parent": parent_name, - "folder": { - "name": folder_name, - }, - }) + folder_data = get_folder_template_data( + folder_entity, + project_entity["name"] + ) + anatomy_data.update(folder_data) return if instance.data.get("newAssetPublishing"): @@ -418,6 +406,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): "parent": parent_name, "folder": { "name": folder_name, + "path": instance.data["folderPath"], + # TODO get folder type from hierarchy + # Using 'Shot' is current default behavior of editorial + # (or 'newAssetPublishing') publishing. + "type": "Shot", }, }) diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py index 764168edd3..865b566e6e 100644 --- a/client/ayon_core/plugins/publish/integrate.py +++ b/client/ayon_core/plugins/publish/integrate.py @@ -42,7 +42,7 @@ def prepare_changes(old_entity, new_entity): Returns: dict[str, Any]: Changes that have new entity. - + """ changes = {} for key in set(new_entity.keys()): @@ -108,68 +108,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin): label = "Integrate Asset" order = pyblish.api.IntegratorOrder - families = ["workfile", - "pointcache", - "pointcloud", - "proxyAbc", - "camera", - "animation", - "model", - "maxScene", - "mayaAscii", - "mayaScene", - "setdress", - "layout", - "ass", - "vdbcache", - "scene", - "vrayproxy", - "vrayscene_layer", - "render", - "prerender", - "imagesequence", - "review", - "rendersetup", - "rig", - "plate", - "look", - "ociolook", - "audio", - "yetiRig", - "yeticache", - "nukenodes", - "gizmo", - "source", - "matchmove", - "image", - "assembly", - "fbx", - "gltf", - "textures", - "action", - "harmony.template", - "harmony.palette", - "editorial", - "background", - "camerarig", - "redshiftproxy", - "effect", - "xgen", - "hda", - "usd", - "staticMesh", - "skeletalMesh", - "mvLook", - "mvUsd", - "mvUsdComposition", - "mvUsdOverride", - "online", - "uasset", - "blendScene", - "yeticacheUE", - "tycache", - "csv_ingest_file", - ] default_template_name = "publish" @@ -359,7 +297,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # Compute the resource file infos once (files belonging to the # version instance instead of an individual representation) so - # we can re-use those file infos per representation + # we can reuse those file infos per representation resource_file_infos = self.get_files_info( resource_destinations, anatomy ) diff --git a/client/ayon_core/plugins/publish/validate_containers.py b/client/ayon_core/plugins/publish/validate_containers.py index bd21ec9693..520e7a7ce9 100644 --- a/client/ayon_core/plugins/publish/validate_containers.py +++ b/client/ayon_core/plugins/publish/validate_containers.py @@ -1,6 +1,11 @@ import pyblish.api + +from ayon_core.lib import filter_profiles +from ayon_core.host import ILoadHost from ayon_core.pipeline.load import any_outdated_containers from ayon_core.pipeline import ( + get_current_host_name, + registered_host, PublishXmlValidationError, OptionalPyblishPluginMixin ) @@ -18,17 +23,50 @@ class ShowInventory(pyblish.api.Action): host_tools.show_scene_inventory() -class ValidateContainers(OptionalPyblishPluginMixin, - pyblish.api.ContextPlugin): - +class ValidateOutdatedContainers( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Containers are must be updated to latest version on publish.""" label = "Validate Outdated Containers" order = pyblish.api.ValidatorOrder - hosts = ["maya", "houdini", "nuke", "harmony", "photoshop", "aftereffects"] + optional = True actions = [ShowInventory] + @classmethod + def apply_settings(cls, settings): + # Disable plugin if host does not inherit from 'ILoadHost' + # - not a host that can load containers + host = registered_host() + if not isinstance(host, ILoadHost): + cls.enabled = False + return + + # Disable if no profile is found for the current host + profiles = ( + settings + ["core"] + ["publish"] + ["ValidateOutdatedContainers"] + ["plugin_state_profiles"] + ) + profile = filter_profiles( + profiles, {"host_names": get_current_host_name()} + ) + if not profile: + cls.enabled = False + return + + # Apply settings from profile + for attr_name in { + "enabled", + "optional", + "active", + }: + setattr(cls, attr_name, profile[attr_name]) + def process(self, context): if not self.is_active(context.data): return diff --git a/client/ayon_core/resources/app_icons/3de4.png b/client/ayon_core/resources/app_icons/3de4.png new file mode 100644 index 0000000000..bd0fe40d37 Binary files /dev/null and b/client/ayon_core/resources/app_icons/3de4.png differ diff --git a/client/ayon_core/scripts/ocio_wrapper.py b/client/ayon_core/scripts/ocio_wrapper.py index 0a78e33c1f..0414fc59ce 100644 --- a/client/ayon_core/scripts/ocio_wrapper.py +++ b/client/ayon_core/scripts/ocio_wrapper.py @@ -1,28 +1,31 @@ """OpenColorIO Wrapper. -Only to be interpreted by Python 3. It is run in subprocess in case -Python 2 hosts needs to use it. Or it is used as module for Python 3 -processing. - -Providing functionality: -- get_colorspace - console command - python 2 - - returning all available color spaces - found in input config path. -- _get_colorspace_data - python 3 - module function - - returning all available colorspaces - found in input config path. -- get_views - console command - python 2 - - returning all available viewers - found in input config path. -- _get_views_data - python 3 - module function - - returning all available viewers - found in input config path. +Receive OpenColorIO information and store it in JSON format for processed +that don't have access to OpenColorIO or their version of OpenColorIO is +not compatible. """ -import click import json from pathlib import Path -import PyOpenColorIO as ocio + +import click + +from ayon_core.pipeline.colorspace import ( + has_compatible_ocio_package, + get_display_view_colorspace_name, + get_config_file_rules_colorspace_from_filepath, + get_config_version_data, + get_ocio_config_views, + get_ocio_config_colorspaces, +) + + +def _save_output_to_json_file(output, output_path): + json_path = Path(output_path) + with open(json_path, "w") as stream: + json.dump(output, stream) + + print(f"Data are saved to '{json_path}'") @click.group() @@ -30,404 +33,185 @@ def main(): pass # noqa: WPS100 -@main.group() -def config(): - """Config related commands group - - Example of use: - > pyton.exe ./ocio_wrapper.py config *args - """ - pass # noqa: WPS100 - - -@main.group() -def colorspace(): - """Colorspace related commands group - - Example of use: - > pyton.exe ./ocio_wrapper.py config *args - """ - pass # noqa: WPS100 - - -@config.command( - name="get_colorspace", - help=( - "return all colorspaces from config file " - "--path input arg is required" - ) -) -@click.option("--in_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_colorspace(in_path, out_path): +@main.command( + name="get_ocio_config_colorspaces", + help="return all colorspaces from config file") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_ocio_config_colorspaces(config_path, output_path): """Aggregate all colorspace to file. - Python 2 wrapped console command - Args: - in_path (str): config file path string - out_path (str): temp json file path string + config_path (str): config file path string + output_path (str): temp json file path string Example of use: > pyton.exe ./ocio_wrapper.py config get_colorspace - --in_path= --out_path= + --config_path --output_path """ - json_path = Path(out_path) - - out_data = _get_colorspace_data(in_path) - - with open(json_path, "w") as f_: - json.dump(out_data, f_) - - print(f"Colorspace data are saved to '{json_path}'") - - -def _get_colorspace_data(config_path): - """Return all found colorspace data. - - Args: - config_path (str): path string leading to config.ocio - - Raises: - IOError: Input config does not exist. - - Returns: - dict: aggregated available colorspaces - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError( - f"Input path `{config_path}` should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - colorspace_data = { - "roles": {}, - "colorspaces": { - color.getName(): { - "family": color.getFamily(), - "categories": list(color.getCategories()), - "aliases": list(color.getAliases()), - "equalitygroup": color.getEqualityGroup(), - } - for color in config.getColorSpaces() - }, - "displays_views": { - f"{view} ({display})": { - "display": display, - "view": view - - } - for display in config.getDisplays() - for view in config.getViews(display) - }, - "looks": {} - } - - # add looks - looks = config.getLooks() - if looks: - colorspace_data["looks"] = { - look.getName(): {"process_space": look.getProcessSpace()} - for look in looks - } - - # add roles - roles = config.getRoles() - if roles: - colorspace_data["roles"] = { - role: {"colorspace": colorspace} - for (role, colorspace) in roles - } - - return colorspace_data - - -@config.command( - name="get_views", - help=( - "return all viewers from config file " - "--path input arg is required" + _save_output_to_json_file( + get_ocio_config_colorspaces(config_path), + output_path ) -) -@click.option("--in_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_views(in_path, out_path): + + +@main.command( + name="get_ocio_config_views", + help="All viewers from config file") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_ocio_config_views(config_path, output_path): """Aggregate all viewers to file. - Python 2 wrapped console command - Args: - in_path (str): config file path string - out_path (str): temp json file path string + config_path (str): config file path string + output_path (str): temp json file path string Example of use: > pyton.exe ./ocio_wrapper.py config get_views \ - --in_path= --out_path= + --config_path --output """ - json_path = Path(out_path) - - out_data = _get_views_data(in_path) - - with open(json_path, "w") as f_: - json.dump(out_data, f_) - - print(f"Viewer data are saved to '{json_path}'") - - -def _get_views_data(config_path): - """Return all found viewer data. - - Args: - config_path (str): path string leading to config.ocio - - Raises: - IOError: Input config does not exist. - - Returns: - dict: aggregated available viewers - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError("Input path should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - data_ = {} - for display in config.getDisplays(): - for view in config.getViews(display): - colorspace = config.getDisplayViewColorSpaceName(display, view) - # Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa - if colorspace == "": - colorspace = display - - data_[f"{display}/{view}"] = { - "display": display, - "view": view, - "colorspace": colorspace - } - - return data_ - - -@config.command( - name="get_version", - help=( - "return major and minor version from config file " - "--config_path input arg is required" - "--out_path input arg is required" + _save_output_to_json_file( + get_ocio_config_views(config_path), + output_path ) -) -@click.option("--config_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_version(config_path, out_path): - """Get version of config. - Python 2 wrapped console command + +@main.command( + name="get_config_version_data", + help="Get major and minor version from config file") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_config_version_data(config_path, output_path): + """Get version of config. Args: config_path (str): ocio config file path string - out_path (str): temp json file path string + output_path (str): temp json file path string Example of use: > pyton.exe ./ocio_wrapper.py config get_version \ - --config_path= --out_path= + --config_path --output_path """ - json_path = Path(out_path) - - out_data = _get_version_data(config_path) - - with open(json_path, "w") as f_: - json.dump(out_data, f_) - - print(f"Config version data are saved to '{json_path}'") - - -def _get_version_data(config_path): - """Return major and minor version info. - - Args: - config_path (str): path string leading to config.ocio - - Raises: - IOError: Input config does not exist. - - Returns: - dict: minor and major keys with values - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError("Input path should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - return { - "major": config.getMajorVersion(), - "minor": config.getMinorVersion() - } - - -@colorspace.command( - name="get_config_file_rules_colorspace_from_filepath", - help=( - "return colorspace from filepath " - "--config_path - ocio config file path (input arg is required) " - "--filepath - any file path (input arg is required) " - "--out_path - temp json file path (input arg is required)" + _save_output_to_json_file( + get_config_version_data(config_path), + output_path ) -) -@click.option("--config_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--filepath", required=True, - help="path to file to get colorspace from", - type=click.Path()) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -def get_config_file_rules_colorspace_from_filepath( - config_path, filepath, out_path + + +@main.command( + name="get_config_file_rules_colorspace_from_filepath", + help="Colorspace file rules from filepath") +@click.option( + "--config_path", + required=True, + help="OCIO config path to read ocio config file.", + type=click.Path(exists=True)) +@click.option( + "--filepath", + required=True, + help="Path to file to get colorspace from.", + type=click.Path()) +@click.option( + "--output_path", + required=True, + help="Path where to write output json file.", + type=click.Path()) +def _get_config_file_rules_colorspace_from_filepath( + config_path, filepath, output_path ): """Get colorspace from file path wrapper. - Python 2 wrapped console command - Args: config_path (str): config file path string filepath (str): path string leading to file - out_path (str): temp json file path string + output_path (str): temp json file path string Example of use: - > pyton.exe ./ocio_wrapper.py \ + > python.exe ./ocio_wrapper.py \ colorspace get_config_file_rules_colorspace_from_filepath \ - --config_path= --filepath= --out_path= + --config_path --filepath --output_path """ - json_path = Path(out_path) - - colorspace = _get_config_file_rules_colorspace_from_filepath( - config_path, filepath) - - with open(json_path, "w") as f_: - json.dump(colorspace, f_) - - print(f"Colorspace name is saved to '{json_path}'") + _save_output_to_json_file( + get_config_file_rules_colorspace_from_filepath(config_path, filepath), + output_path + ) -def _get_config_file_rules_colorspace_from_filepath(config_path, filepath): - """Return found colorspace data found in v2 file rules. - - Args: - config_path (str): path string leading to config.ocio - filepath (str): path string leading to v2 file rules - - Raises: - IOError: Input config does not exist. - - Returns: - dict: aggregated available colorspaces - """ - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError( - f"Input path `{config_path}` should be `config.ocio` file") - - config = ocio.Config().CreateFromFile(str(config_path)) - - # TODO: use `parseColorSpaceFromString` instead if ocio v1 - colorspace = config.getColorSpaceFromFilepath(str(filepath)) - - return colorspace - - -def _get_display_view_colorspace_name(config_path, display, view): - """Returns the colorspace attribute of the (display, view) pair. - - Args: - config_path (str): path string leading to config.ocio - display (str): display name e.g. "ACES" - view (str): view name e.g. "sRGB" - - - Raises: - IOError: Input config does not exist. - - Returns: - view color space name (str) e.g. "Output - sRGB" - """ - - config_path = Path(config_path) - - if not config_path.is_file(): - raise IOError("Input path should be `config.ocio` file") - - config = ocio.Config.CreateFromFile(str(config_path)) - colorspace = config.getDisplayViewColorSpaceName(display, view) - - return colorspace - - -@config.command( +@main.command( name="get_display_view_colorspace_name", help=( - "return default view colorspace name " - "for the given display and view " - "--path input arg is required" - ) -) -@click.option("--in_path", required=True, - help="path where to read ocio config file", - type=click.Path(exists=True)) -@click.option("--out_path", required=True, - help="path where to write output json file", - type=click.Path()) -@click.option("--display", required=True, - help="display name", - type=click.STRING) -@click.option("--view", required=True, - help="view name", - type=click.STRING) -def get_display_view_colorspace_name(in_path, out_path, - display, view): + "Default view colorspace name for the given display and view" + )) +@click.option( + "--config_path", + required=True, + help="path where to read ocio config file", + type=click.Path(exists=True)) +@click.option( + "--display", + required=True, + help="Display name", + type=click.STRING) +@click.option( + "--view", + required=True, + help="view name", + type=click.STRING) +@click.option( + "--output_path", + required=True, + help="path where to write output json file", + type=click.Path()) +def _get_display_view_colorspace_name( + config_path, display, view, output_path +): """Aggregate view colorspace name to file. Wrapper command for processes without access to OpenColorIO Args: - in_path (str): config file path string - out_path (str): temp json file path string + config_path (str): config file path string + output_path (str): temp json file path string display (str): display name e.g. "ACES" view (str): view name e.g. "sRGB" Example of use: > pyton.exe ./ocio_wrapper.py config \ - get_display_view_colorspace_name --in_path= \ - --out_path= --display= --view= + get_display_view_colorspace_name --config_path \ + --output_path --display --view """ + _save_output_to_json_file( + get_display_view_colorspace_name(config_path, display, view), + output_path + ) - out_data = _get_display_view_colorspace_name(in_path, - display, - view) - with open(out_path, "w") as f: - json.dump(out_data, f) - - print(f"Display view colorspace saved to '{out_path}'") - -if __name__ == '__main__': +if __name__ == "__main__": + if not has_compatible_ocio_package(): + raise RuntimeError("OpenColorIO is not available.") main() diff --git a/client/ayon_core/tools/adobe_webserver/app.py b/client/ayon_core/tools/adobe_webserver/app.py index 7d97d7d66d..26bf638c91 100644 --- a/client/ayon_core/tools/adobe_webserver/app.py +++ b/client/ayon_core/tools/adobe_webserver/app.py @@ -104,14 +104,11 @@ class WebServerTool: again. In that case, use existing running webserver. Check here is easier than capturing exception from thread. """ - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - result = True - try: - sock.bind((host_name, port)) - result = False - except: - print("Port is in use") + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con: + result = con.connect_ex((host_name, port)) == 0 + if result: + print(f"Port {port} is already in use") return result def call(self, func): diff --git a/client/ayon_core/tools/common_models/cache.py b/client/ayon_core/tools/common_models/cache.py index 221a14160c..59b727728f 100644 --- a/client/ayon_core/tools/common_models/cache.py +++ b/client/ayon_core/tools/common_models/cache.py @@ -1,239 +1,31 @@ -import time -import collections +import warnings -InitInfo = collections.namedtuple( - "InitInfo", - ["default_factory", "lifetime"] +from ayon_core.lib import CacheItem as _CacheItem +from ayon_core.lib import NestedCacheItem as _NestedCacheItem + + +# Cache classes were moved to `ayon_core.lib.cache` +class CacheItem(_CacheItem): + def __init__(self, *args, **kwargs): + warnings.warn( + "Used 'CacheItem' from deprecated location " + "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + + +class NestedCacheItem(_NestedCacheItem): + def __init__(self, *args, **kwargs): + warnings.warn( + "Used 'NestedCacheItem' from deprecated location " + "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + + +__all__ = ( + "CacheItem", + "NestedCacheItem", ) - - -def _default_factory_func(): - return None - - -class CacheItem: - """Simple cache item with lifetime and default value. - - Args: - default_factory (Optional[callable]): Function that returns default - value used on init and on reset. - lifetime (Optional[int]): Lifetime of the cache data in seconds. - """ - - def __init__(self, default_factory=None, lifetime=None): - if lifetime is None: - lifetime = 120 - self._lifetime = lifetime - self._last_update = None - if default_factory is None: - default_factory = _default_factory_func - self._default_factory = default_factory - self._data = default_factory() - - @property - def is_valid(self): - """Is cache valid to use. - - Return: - bool: True if cache is valid, False otherwise. - """ - - if self._last_update is None: - return False - - return (time.time() - self._last_update) < self._lifetime - - def set_lifetime(self, lifetime): - """Change lifetime of cache item. - - Args: - lifetime (int): Lifetime of the cache data in seconds. - """ - - self._lifetime = lifetime - - def set_invalid(self): - """Set cache as invalid.""" - - self._last_update = None - - def reset(self): - """Set cache as invalid and reset data.""" - - self._last_update = None - self._data = self._default_factory() - - def get_data(self): - """Receive cached data. - - Returns: - Any: Any data that are cached. - """ - - return self._data - - def update_data(self, data): - self._data = data - self._last_update = time.time() - - -class NestedCacheItem: - """Helper for cached items stored in nested structure. - - Example: - >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0) - >>> cache["a"]["b"].is_valid - False - >>> cache["a"]["b"].get_data() - 0 - >>> cache["a"]["b"] = 1 - >>> cache["a"]["b"].is_valid - True - >>> cache["a"]["b"].get_data() - 1 - >>> cache.reset() - >>> cache["a"]["b"].is_valid - False - - Args: - levels (int): Number of nested levels where read cache is stored. - default_factory (Optional[callable]): Function that returns default - value used on init and on reset. - lifetime (Optional[int]): Lifetime of the cache data in seconds. - _init_info (Optional[InitInfo]): Private argument. Init info for - nested cache where created from parent item. - """ - - def __init__( - self, levels=1, default_factory=None, lifetime=None, _init_info=None - ): - if levels < 1: - raise ValueError("Nested levels must be greater than 0") - self._data_by_key = {} - if _init_info is None: - _init_info = InitInfo(default_factory, lifetime) - self._init_info = _init_info - self._levels = levels - - def __getitem__(self, key): - """Get cached data. - - Args: - key (str): Key of the cache item. - - Returns: - Union[NestedCacheItem, CacheItem]: Cache item. - """ - - cache = self._data_by_key.get(key) - if cache is None: - if self._levels > 1: - cache = NestedCacheItem( - levels=self._levels - 1, - _init_info=self._init_info - ) - else: - cache = CacheItem( - self._init_info.default_factory, - self._init_info.lifetime - ) - self._data_by_key[key] = cache - return cache - - def __setitem__(self, key, value): - """Update cached data. - - Args: - key (str): Key of the cache item. - value (Any): Any data that are cached. - """ - - if self._levels > 1: - raise AttributeError(( - "{} does not support '__setitem__'. Lower nested level by {}" - ).format(self.__class__.__name__, self._levels - 1)) - cache = self[key] - cache.update_data(value) - - def get(self, key): - """Get cached data. - - Args: - key (str): Key of the cache item. - - Returns: - Union[NestedCacheItem, CacheItem]: Cache item. - """ - - return self[key] - - def cached_count(self): - """Amount of cached items. - - Returns: - int: Amount of cached items. - """ - - return len(self._data_by_key) - - def clear_key(self, key): - """Clear cached item by key. - - Args: - key (str): Key of the cache item. - """ - - self._data_by_key.pop(key, None) - - def clear_invalid(self): - """Clear all invalid cache items. - - Note: - To clear all cache items use 'reset'. - """ - - changed = {} - children_are_nested = self._levels > 1 - for key, cache in tuple(self._data_by_key.items()): - if children_are_nested: - output = cache.clear_invalid() - if output: - changed[key] = output - if not cache.cached_count(): - self._data_by_key.pop(key) - elif not cache.is_valid: - changed[key] = cache.get_data() - self._data_by_key.pop(key) - return changed - - def reset(self): - """Reset cache. - - Note: - To clear only invalid cache items use 'clear_invalid'. - """ - - self._data_by_key = {} - - def set_lifetime(self, lifetime): - """Change lifetime of all children cache items. - - Args: - lifetime (int): Lifetime of the cache data in seconds. - """ - - self._init_info.lifetime = lifetime - for cache in self._data_by_key.values(): - cache.set_lifetime(lifetime) - - @property - def is_valid(self): - """Raise reasonable error when called on wront level. - - Raises: - AttributeError: If called on nested cache item. - """ - - raise AttributeError(( - "{} does not support 'is_valid'. Lower nested level by '{}'" - ).format(self.__class__.__name__, self._levels)) diff --git a/client/ayon_core/tools/common_models/hierarchy.py b/client/ayon_core/tools/common_models/hierarchy.py index d8b28f020d..78b8a7f492 100644 --- a/client/ayon_core/tools/common_models/hierarchy.py +++ b/client/ayon_core/tools/common_models/hierarchy.py @@ -6,8 +6,7 @@ import ayon_api import six from ayon_core.style import get_default_entity_icon_color - -from .cache import NestedCacheItem +from ayon_core.lib import NestedCacheItem HIERARCHY_MODEL_SENDER = "hierarchy.model" diff --git a/client/ayon_core/tools/common_models/projects.py b/client/ayon_core/tools/common_models/projects.py index e30561000e..19a38bee21 100644 --- a/client/ayon_core/tools/common_models/projects.py +++ b/client/ayon_core/tools/common_models/projects.py @@ -5,8 +5,7 @@ import ayon_api import six from ayon_core.style import get_default_entity_icon_color - -from .cache import CacheItem +from ayon_core.lib import CacheItem PROJECTS_MODEL_SENDER = "projects.model" diff --git a/client/ayon_core/tools/common_models/thumbnails.py b/client/ayon_core/tools/common_models/thumbnails.py index 1c3aadc49f..2fa1e36e5c 100644 --- a/client/ayon_core/tools/common_models/thumbnails.py +++ b/client/ayon_core/tools/common_models/thumbnails.py @@ -1,234 +1,15 @@ -import os -import time import collections import ayon_api -import appdirs -from .cache import NestedCacheItem - -FileInfo = collections.namedtuple( - "FileInfo", - ("path", "size", "modification_time") -) - - -class ThumbnailsCache: - """Cache of thumbnails on local storage. - - Thumbnails are cached to appdirs to predefined directory. Each project has - own subfolder with thumbnails -> that's because each project has own - thumbnail id validation and file names are thumbnail ids with matching - extension. Extensions are predefined (.png and .jpeg). - - Cache has cleanup mechanism which is triggered on initialized by default. - - The cleanup has 2 levels: - 1. soft cleanup which remove all files that are older then 'days_alive' - 2. max size cleanup which remove all files until the thumbnails folder - contains less then 'max_filesize' - - this is time consuming so it's not triggered automatically - - Args: - cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails). - """ - - # Lifetime of thumbnails (in seconds) - # - default 3 days - days_alive = 3 - # Max size of thumbnail directory (in bytes) - # - default 2 Gb - max_filesize = 2 * 1024 * 1024 * 1024 - - def __init__(self, cleanup=True): - self._thumbnails_dir = None - self._days_alive_secs = self.days_alive * 24 * 60 * 60 - if cleanup: - self.cleanup() - - def get_thumbnails_dir(self): - """Root directory where thumbnails are stored. - - Returns: - str: Path to thumbnails root. - """ - - if self._thumbnails_dir is None: - # TODO use generic function - directory = appdirs.user_data_dir("AYON", "Ynput") - self._thumbnails_dir = os.path.join(directory, "thumbnails") - return self._thumbnails_dir - - thumbnails_dir = property(get_thumbnails_dir) - - def get_thumbnails_dir_file_info(self): - """Get information about all files in thumbnails directory. - - Returns: - List[FileInfo]: List of file information about all files. - """ - - thumbnails_dir = self.thumbnails_dir - files_info = [] - if not os.path.exists(thumbnails_dir): - return files_info - - for root, _, filenames in os.walk(thumbnails_dir): - for filename in filenames: - path = os.path.join(root, filename) - files_info.append(FileInfo( - path, os.path.getsize(path), os.path.getmtime(path) - )) - return files_info - - def get_thumbnails_dir_size(self, files_info=None): - """Got full size of thumbnail directory. - - Args: - files_info (List[FileInfo]): Prepared file information about - files in thumbnail directory. - - Returns: - int: File size of all files in thumbnail directory. - """ - - if files_info is None: - files_info = self.get_thumbnails_dir_file_info() - - if not files_info: - return 0 - - return sum( - file_info.size - for file_info in files_info - ) - - def cleanup(self, check_max_size=False): - """Cleanup thumbnails directory. - - Args: - check_max_size (bool): Also cleanup files to match max size of - thumbnails directory. - """ - - thumbnails_dir = self.get_thumbnails_dir() - # Skip if thumbnails dir does not exist yet - if not os.path.exists(thumbnails_dir): - return - - self._soft_cleanup(thumbnails_dir) - if check_max_size: - self._max_size_cleanup(thumbnails_dir) - - def _soft_cleanup(self, thumbnails_dir): - current_time = time.time() - for root, _, filenames in os.walk(thumbnails_dir): - for filename in filenames: - path = os.path.join(root, filename) - modification_time = os.path.getmtime(path) - if current_time - modification_time > self._days_alive_secs: - os.remove(path) - - def _max_size_cleanup(self, thumbnails_dir): - files_info = self.get_thumbnails_dir_file_info() - size = self.get_thumbnails_dir_size(files_info) - if size < self.max_filesize: - return - - sorted_file_info = collections.deque( - sorted(files_info, key=lambda item: item.modification_time) - ) - diff = size - self.max_filesize - while diff > 0: - if not sorted_file_info: - break - - file_info = sorted_file_info.popleft() - diff -= file_info.size - os.remove(file_info.path) - - def get_thumbnail_filepath(self, project_name, thumbnail_id): - """Get thumbnail by thumbnail id. - - Args: - project_name (str): Name of project. - thumbnail_id (str): Thumbnail id. - - Returns: - Union[str, None]: Path to thumbnail image or None if thumbnail - is not cached yet. - """ - - if not thumbnail_id: - return None - - for ext in ( - ".png", - ".jpeg", - ): - filepath = os.path.join( - self.thumbnails_dir, project_name, thumbnail_id + ext - ) - if os.path.exists(filepath): - return filepath - return None - - def get_project_dir(self, project_name): - """Path to root directory for specific project. - - Args: - project_name (str): Name of project for which root directory path - should be returned. - - Returns: - str: Path to root of project's thumbnails. - """ - - return os.path.join(self.thumbnails_dir, project_name) - - def make_sure_project_dir_exists(self, project_name): - project_dir = self.get_project_dir(project_name) - if not os.path.exists(project_dir): - os.makedirs(project_dir) - return project_dir - - def store_thumbnail(self, project_name, thumbnail_id, content, mime_type): - """Store thumbnail to cache folder. - - Args: - project_name (str): Project where the thumbnail belong to. - thumbnail_id (str): Id of thumbnail. - content (bytes): Byte content of thumbnail file. - mime_data (str): Type of content. - - Returns: - str: Path to cached thumbnail image file. - """ - - if mime_type == "image/png": - ext = ".png" - elif mime_type == "image/jpeg": - ext = ".jpeg" - else: - raise ValueError( - "Unknown mime type for thumbnail \"{}\"".format(mime_type)) - - project_dir = self.make_sure_project_dir_exists(project_name) - thumbnail_path = os.path.join(project_dir, thumbnail_id + ext) - with open(thumbnail_path, "wb") as stream: - stream.write(content) - - current_time = time.time() - os.utime(thumbnail_path, (current_time, current_time)) - - return thumbnail_path +from ayon_core.lib import NestedCacheItem +from ayon_core.pipeline.thumbnails import get_thumbnail_path class ThumbnailsModel: entity_cache_lifetime = 240 # In seconds def __init__(self): - self._thumbnail_cache = ThumbnailsCache() self._paths_cache = collections.defaultdict(dict) self._folders_cache = NestedCacheItem( levels=2, lifetime=self.entity_cache_lifetime) @@ -283,28 +64,7 @@ class ThumbnailsModel: if thumbnail_id in project_cache: return project_cache[thumbnail_id] - filepath = self._thumbnail_cache.get_thumbnail_filepath( - project_name, thumbnail_id - ) - if filepath is not None: - project_cache[thumbnail_id] = filepath - return filepath - - # 'ayon_api' had a bug, public function - # 'get_thumbnail_by_id' did not return output of - # 'ServerAPI' method. - con = ayon_api.get_server_api_connection() - result = con.get_thumbnail_by_id(project_name, thumbnail_id) - if result is None: - pass - - elif result.is_valid: - filepath = self._thumbnail_cache.store_thumbnail( - project_name, - thumbnail_id, - result.content, - result.content_type - ) + filepath = get_thumbnail_path(project_name, thumbnail_id) project_cache[thumbnail_id] = filepath return filepath diff --git a/client/ayon_core/tools/launcher/ui/actions_widget.py b/client/ayon_core/tools/launcher/ui/actions_widget.py index a225827418..2ffce13292 100644 --- a/client/ayon_core/tools/launcher/ui/actions_widget.py +++ b/client/ayon_core/tools/launcher/ui/actions_widget.py @@ -290,6 +290,34 @@ class ActionDelegate(QtWidgets.QStyledItemDelegate): painter.drawPixmap(extender_x, extender_y, pix) +class ActionsProxyModel(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) + + def lessThan(self, left, right): + # Sort by action order and then by label + left_value = left.data(ACTION_SORT_ROLE) + right_value = right.data(ACTION_SORT_ROLE) + + # Values are same -> use super sorting + if left_value == right_value: + # Default behavior is using DisplayRole + return super().lessThan(left, right) + + # Validate 'None' values + if right_value is None: + return True + if left_value is None: + return False + # Sort values and handle incompatible types + try: + return left_value < right_value + except TypeError: + return True + + class ActionsWidget(QtWidgets.QWidget): def __init__(self, controller, parent): super(ActionsWidget, self).__init__(parent) @@ -316,10 +344,7 @@ class ActionsWidget(QtWidgets.QWidget): model = ActionsQtModel(controller) - proxy_model = QtCore.QSortFilterProxyModel() - proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive) - proxy_model.setSortRole(ACTION_SORT_ROLE) - + proxy_model = ActionsProxyModel() proxy_model.setSourceModel(model) view.setModel(proxy_model) @@ -359,7 +384,8 @@ class ActionsWidget(QtWidgets.QWidget): def _on_model_refresh(self): self._proxy_model.sort(0) # Force repaint all items - self._view.update() + viewport = self._view.viewport() + viewport.update() def _on_animation(self): time_now = time.time() diff --git a/client/ayon_core/tools/loader/models/actions.py b/client/ayon_core/tools/loader/models/actions.py index ad2993af50..cfe91cadab 100644 --- a/client/ayon_core/tools/loader/models/actions.py +++ b/client/ayon_core/tools/loader/models/actions.py @@ -6,6 +6,7 @@ import uuid import ayon_api +from ayon_core.lib import NestedCacheItem from ayon_core.pipeline.load import ( discover_loader_plugins, ProductLoaderPlugin, @@ -17,7 +18,6 @@ from ayon_core.pipeline.load import ( LoadError, IncompatibleLoaderError, ) -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ActionItem ACTIONS_MODEL_SENDER = "actions.model" diff --git a/client/ayon_core/tools/loader/models/products.py b/client/ayon_core/tools/loader/models/products.py index 812446a012..a3bbc30a09 100644 --- a/client/ayon_core/tools/loader/models/products.py +++ b/client/ayon_core/tools/loader/models/products.py @@ -5,8 +5,8 @@ import arrow import ayon_api from ayon_api.operations import OperationsSession +from ayon_core.lib import NestedCacheItem from ayon_core.style import get_default_entity_icon_color -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ( ProductTypeItem, ProductItem, diff --git a/client/ayon_core/tools/loader/models/sitesync.py b/client/ayon_core/tools/loader/models/sitesync.py index 987510905b..02504c2ad3 100644 --- a/client/ayon_core/tools/loader/models/sitesync.py +++ b/client/ayon_core/tools/loader/models/sitesync.py @@ -2,9 +2,8 @@ import collections from ayon_api import get_representations, get_versions_links -from ayon_core.lib import Logger +from ayon_core.lib import Logger, NestedCacheItem from ayon_core.addon import AddonsManager -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ActionItem DOWNLOAD_IDENTIFIER = "sitesync.download" diff --git a/client/ayon_core/tools/loader/ui/window.py b/client/ayon_core/tools/loader/ui/window.py index 3a6f4679fa..8529a53b06 100644 --- a/client/ayon_core/tools/loader/ui/window.py +++ b/client/ayon_core/tools/loader/ui/window.py @@ -335,9 +335,7 @@ class LoaderWindow(QtWidgets.QWidget): def closeEvent(self, event): super(LoaderWindow, self).closeEvent(event) - # Deselect project so current context will be selected - # on next 'showEvent' - self._controller.set_selected_project(None) + self._reset_on_show = True def keyPressEvent(self, event): diff --git a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py index 47c5399cf7..4e34f9b58c 100644 --- a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py @@ -52,6 +52,7 @@ class SelectionTypes: class BaseGroupWidget(QtWidgets.QWidget): selected = QtCore.Signal(str, str, str) removed_selected = QtCore.Signal() + double_clicked = QtCore.Signal() def __init__(self, group_name, parent): super(BaseGroupWidget, self).__init__(parent) @@ -192,6 +193,7 @@ class ConvertorItemsGroupWidget(BaseGroupWidget): else: widget = ConvertorItemCardWidget(item, self) widget.selected.connect(self._on_widget_selection) + widget.double_clicked(self.double_clicked) self._widgets_by_id[item.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 @@ -254,6 +256,7 @@ class InstanceGroupWidget(BaseGroupWidget): ) widget.selected.connect(self._on_widget_selection) widget.active_changed.connect(self._on_active_changed) + widget.double_clicked.connect(self.double_clicked) self._widgets_by_id[instance.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 @@ -271,6 +274,7 @@ class CardWidget(BaseClickableFrame): # Group identifier of card # - this must be set because if send when mouse is released with card id _group_identifier = None + double_clicked = QtCore.Signal() def __init__(self, parent): super(CardWidget, self).__init__(parent) @@ -279,6 +283,11 @@ class CardWidget(BaseClickableFrame): self._selected = False self._id = None + def mouseDoubleClickEvent(self, event): + super(CardWidget, self).mouseDoubleClickEvent(event) + if self._is_valid_double_click(event): + self.double_clicked.emit() + @property def id(self): """Id of card.""" @@ -312,6 +321,9 @@ class CardWidget(BaseClickableFrame): self.selected.emit(self._id, self._group_identifier, selection_type) + def _is_valid_double_click(self, event): + return True + class ContextCardWidget(CardWidget): """Card for global context. @@ -527,6 +539,15 @@ class InstanceCardWidget(CardWidget): def _on_expend_clicked(self): self._set_expanded() + def _is_valid_double_click(self, event): + widget = self.childAt(event.pos()) + if ( + widget is self._active_checkbox + or widget is self._expand_btn + ): + return False + return True + class InstanceCardView(AbstractInstanceView): """Publish access to card view. @@ -534,6 +555,8 @@ class InstanceCardView(AbstractInstanceView): Wrapper of all widgets in card view. """ + double_clicked = QtCore.Signal() + def __init__(self, controller, parent): super(InstanceCardView, self).__init__(parent) @@ -715,6 +738,7 @@ class InstanceCardView(AbstractInstanceView): ) group_widget.active_changed.connect(self._on_active_changed) group_widget.selected.connect(self._on_widget_selection) + group_widget.double_clicked.connect(self.double_clicked) self._content_layout.insertWidget(widget_idx, group_widget) self._widgets_by_group[group_name] = group_widget @@ -755,6 +779,7 @@ class InstanceCardView(AbstractInstanceView): widget = ContextCardWidget(self._content_widget) widget.selected.connect(self._on_widget_selection) + widget.double_clicked.connect(self.double_clicked) self._context_widget = widget @@ -778,6 +803,7 @@ class InstanceCardView(AbstractInstanceView): CONVERTOR_ITEM_GROUP, self._content_widget ) group_widget.selected.connect(self._on_widget_selection) + group_widget.double_clicked.connect(self.double_clicked) self._content_layout.insertWidget(1, group_widget) self._convertor_items_group = group_widget diff --git a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py index 3322a73be6..71be0ab1a4 100644 --- a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py @@ -110,6 +110,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): This is required to be able use custom checkbox on custom place. """ active_changed = QtCore.Signal(str, bool) + double_clicked = QtCore.Signal() def __init__(self, instance, parent): super(InstanceListItemWidget, self).__init__(parent) @@ -149,6 +150,12 @@ class InstanceListItemWidget(QtWidgets.QWidget): self._set_valid_property(instance.has_valid_context) + def mouseDoubleClickEvent(self, event): + widget = self.childAt(event.pos()) + super(InstanceListItemWidget, self).mouseDoubleClickEvent(event) + if widget is not self._active_checkbox: + self.double_clicked.emit() + def _set_valid_property(self, valid): if self._has_valid_context == valid: return @@ -209,6 +216,8 @@ class InstanceListItemWidget(QtWidgets.QWidget): class ListContextWidget(QtWidgets.QFrame): """Context (or global attributes) widget.""" + double_clicked = QtCore.Signal() + def __init__(self, parent): super(ListContextWidget, self).__init__(parent) @@ -225,6 +234,10 @@ class ListContextWidget(QtWidgets.QFrame): self.label_widget = label_widget + def mouseDoubleClickEvent(self, event): + super(ListContextWidget, self).mouseDoubleClickEvent(event) + self.double_clicked.emit() + class InstanceListGroupWidget(QtWidgets.QFrame): """Widget representing group of instances. @@ -317,6 +330,7 @@ class InstanceListGroupWidget(QtWidgets.QFrame): class InstanceTreeView(QtWidgets.QTreeView): """View showing instances and their groups.""" toggle_requested = QtCore.Signal(int) + double_clicked = QtCore.Signal() def __init__(self, *args, **kwargs): super(InstanceTreeView, self).__init__(*args, **kwargs) @@ -425,6 +439,9 @@ class InstanceListView(AbstractInstanceView): This is public access to and from list view. """ + + double_clicked = QtCore.Signal() + def __init__(self, controller, parent): super(InstanceListView, self).__init__(parent) @@ -454,6 +471,7 @@ class InstanceListView(AbstractInstanceView): instance_view.collapsed.connect(self._on_collapse) instance_view.expanded.connect(self._on_expand) instance_view.toggle_requested.connect(self._on_toggle_request) + instance_view.double_clicked.connect(self.double_clicked) self._group_items = {} self._group_widgets = {} @@ -687,6 +705,7 @@ class InstanceListView(AbstractInstanceView): self._active_toggle_enabled ) widget.active_changed.connect(self._on_active_changed) + widget.double_clicked.connect(self.double_clicked) self._instance_view.setIndexWidget(proxy_index, widget) self._widgets_by_id[instance.id] = widget @@ -717,6 +736,7 @@ class InstanceListView(AbstractInstanceView): ) proxy_index = self._proxy_model.mapFromSource(index) widget = ListContextWidget(self._instance_view) + widget.double_clicked.connect(self.double_clicked) self._instance_view.setIndexWidget(proxy_index, widget) self._context_widget = widget diff --git a/client/ayon_core/tools/publisher/widgets/overview_widget.py b/client/ayon_core/tools/publisher/widgets/overview_widget.py index dd82185830..cedf52ae01 100644 --- a/client/ayon_core/tools/publisher/widgets/overview_widget.py +++ b/client/ayon_core/tools/publisher/widgets/overview_widget.py @@ -18,6 +18,7 @@ class OverviewWidget(QtWidgets.QFrame): instance_context_changed = QtCore.Signal() create_requested = QtCore.Signal() convert_requested = QtCore.Signal() + publish_tab_requested = QtCore.Signal() anim_end_value = 200 anim_duration = 200 @@ -113,9 +114,15 @@ class OverviewWidget(QtWidgets.QFrame): product_list_view.selection_changed.connect( self._on_product_change ) + product_list_view.double_clicked.connect( + self.publish_tab_requested + ) product_view_cards.selection_changed.connect( self._on_product_change ) + product_view_cards.double_clicked.connect( + self.publish_tab_requested + ) # Active instances changed product_list_view.active_changed.connect( self._on_active_changed diff --git a/client/ayon_core/tools/publisher/window.py b/client/ayon_core/tools/publisher/window.py index 123864ff6c..1b13ced317 100644 --- a/client/ayon_core/tools/publisher/window.py +++ b/client/ayon_core/tools/publisher/window.py @@ -258,6 +258,9 @@ class PublisherWindow(QtWidgets.QDialog): overview_widget.convert_requested.connect( self._on_convert_requested ) + overview_widget.publish_tab_requested.connect( + self._go_to_publish_tab + ) save_btn.clicked.connect(self._on_save_clicked) reset_btn.clicked.connect(self._on_reset_clicked) diff --git a/client/ayon_core/tools/push_to_project/models/integrate.py b/client/ayon_core/tools/push_to_project/models/integrate.py index 6e43050c05..5937ffa4da 100644 --- a/client/ayon_core/tools/push_to_project/models/integrate.py +++ b/client/ayon_core/tools/push_to_project/models/integrate.py @@ -723,7 +723,6 @@ class ProjectPushItemProcess: dst_project_name = self._item.dst_project_name dst_folder_id = self._item.dst_folder_id dst_task_name = self._item.dst_task_name - dst_task_name_low = dst_task_name.lower() new_folder_name = self._item.new_folder_name if not dst_folder_id and not new_folder_name: self._status.set_failed( @@ -765,7 +764,7 @@ class ProjectPushItemProcess: dst_project_name, folder_ids=[folder_entity["id"]] ) } - task_info = folder_tasks.get(dst_task_name_low) + task_info = folder_tasks.get(dst_task_name.lower()) if not task_info: self._status.set_failed( f"Could find task with name \"{dst_task_name}\"" diff --git a/client/ayon_core/version.py b/client/ayon_core/version.py index a60de0493a..275e1b1dd6 100644 --- a/client/ayon_core/version.py +++ b/client/ayon_core/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring AYON core addon version.""" -__version__ = "0.3.1-dev.1" +__version__ = "0.3.2-dev.1" diff --git a/client/pyproject.toml b/client/pyproject.toml index 1a0ad7e5f2..5e811321f8 100644 --- a/client/pyproject.toml +++ b/client/pyproject.toml @@ -16,7 +16,7 @@ aiohttp_json_rpc = "*" # TVPaint server aiohttp-middlewares = "^2.0.0" wsrpc_aiohttp = "^3.1.1" # websocket server Click = "^8" -OpenTimelineIO = "0.14.1" +OpenTimelineIO = "0.16.0" opencolorio = "2.2.1" Pillow = "9.5.0" pynput = "^1.7.2" # Timers manager - TODO remove diff --git a/package.py b/package.py index 79450d029f..b7b8d2dae6 100644 --- a/package.py +++ b/package.py @@ -1,11 +1,12 @@ name = "core" title = "Core" -version = "0.3.1-dev.1" +version = "0.3.2-dev.1" client_dir = "ayon_core" plugin_for = ["ayon_server"] -requires = [ - "~ayon_server-1.0.3+<2.0.0", -] +ayon_server_version = ">=1.0.3,<2.0.0" +ayon_launcher_version = ">=1.0.2" +ayon_required_addons = {} +ayon_compatible_addons = {} diff --git a/pyproject.toml b/pyproject.toml index c1f6ddfb0b..4726bef41a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -106,7 +106,7 @@ line-ending = "auto" [tool.codespell] # Ignore words that are not in the dictionary. -ignore-words-list = "ayon,ynput,parms,parm,hda,developpement" +ignore-words-list = "ayon,ynput,parms,parm,hda,developpement,ue" skip = "./.*,./package/*,*/vendor/*,*/unreal/integration/*,*/aftereffects/api/extension/js/libs/*" count = true diff --git a/server/__init__.py b/server/__init__.py index 152cc77218..79f505ccd5 100644 --- a/server/__init__.py +++ b/server/__init__.py @@ -1,3 +1,5 @@ +from typing import Any + from ayon_server.addons import BaseServerAddon from .settings import CoreSettings, DEFAULT_VALUES @@ -9,3 +11,53 @@ class CoreAddon(BaseServerAddon): async def get_default_settings(self): settings_model_cls = self.get_settings_model() return settings_model_cls(**DEFAULT_VALUES) + + async def convert_settings_overrides( + self, + source_version: str, + overrides: dict[str, Any], + ) -> dict[str, Any]: + self._convert_imagio_configs_0_3_1(overrides) + # Use super conversion + return await super().convert_settings_overrides( + source_version, overrides + ) + + def _convert_imagio_configs_0_3_1(self, overrides): + """Imageio config settings did change to profiles since 0.3.1. .""" + imageio_overrides = overrides.get("imageio") or {} + if ( + "ocio_config" not in imageio_overrides + or "filepath" not in imageio_overrides["ocio_config"] + ): + return + + ocio_config = imageio_overrides.pop("ocio_config") + + filepath = ocio_config["filepath"] + if not filepath: + return + first_filepath = filepath[0] + ocio_config_profiles = imageio_overrides.setdefault( + "ocio_config_profiles", [] + ) + base_value = { + "type": "builtin_path", + "product_name": "", + "host_names": [], + "task_names": [], + "task_types": [], + "custom_path": "", + "builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio" + } + if first_filepath in ( + "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", + "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio", + ): + base_value["type"] = "builtin_path" + base_value["builtin_path"] = first_filepath + else: + base_value["type"] = "custom_path" + base_value["custom_path"] = first_filepath + + ocio_config_profiles.append(base_value) diff --git a/server/settings/main.py b/server/settings/main.py index 28a69e182d..40e16e7e91 100644 --- a/server/settings/main.py +++ b/server/settings/main.py @@ -54,9 +54,67 @@ class CoreImageIOFileRulesModel(BaseSettingsModel): return value -class CoreImageIOConfigModel(BaseSettingsModel): - filepath: list[str] = SettingsField( - default_factory=list, title="Config path" +def _ocio_config_profile_types(): + return [ + {"value": "builtin_path", "label": "AYON built-in OCIO config"}, + {"value": "custom_path", "label": "Path to OCIO config"}, + {"value": "product_name", "label": "Published product"}, + ] + + +def _ocio_built_in_paths(): + return [ + { + "value": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", + "label": "ACES 1.2", + "description": "Aces 1.2 OCIO config file." + }, + { + "value": "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio", + "label": "Nuke default", + }, + ] + + +class CoreImageIOConfigProfilesModel(BaseSettingsModel): + _layout = "expanded" + host_names: list[str] = SettingsField( + default_factory=list, + title="Host names" + ) + task_types: list[str] = SettingsField( + default_factory=list, + title="Task types", + enum_resolver=task_types_enum + ) + task_names: list[str] = SettingsField( + default_factory=list, + title="Task names" + ) + type: str = SettingsField( + title="Profile type", + enum_resolver=_ocio_config_profile_types, + conditionalEnum=True, + default="builtin_path", + section="---", + ) + builtin_path: str = SettingsField( + "ACES 1.2", + title="Built-in OCIO config", + enum_resolver=_ocio_built_in_paths, + ) + custom_path: str = SettingsField( + "", + title="OCIO config path", + description="Path to OCIO config. Anatomy formatting is supported.", + ) + product_name: str = SettingsField( + "", + title="Product name", + description=( + "Published product name to get OCIO config from. " + "Partial match is supported." + ), ) @@ -65,9 +123,8 @@ class CoreImageIOBaseModel(BaseSettingsModel): False, title="Enable Color Management" ) - ocio_config: CoreImageIOConfigModel = SettingsField( - default_factory=CoreImageIOConfigModel, - title="OCIO config" + ocio_config_profiles: list[CoreImageIOConfigProfilesModel] = SettingsField( + default_factory=list, title="OCIO config profiles" ) file_rules: CoreImageIOFileRulesModel = SettingsField( default_factory=CoreImageIOFileRulesModel, @@ -186,12 +243,17 @@ class CoreSettings(BaseSettingsModel): DEFAULT_VALUES = { "imageio": { "activate_global_color_management": False, - "ocio_config": { - "filepath": [ - "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", - "{BUILTIN_OCIO_ROOT}/nuke-default/config.ocio" - ] - }, + "ocio_config_profiles": [ + { + "host_names": [], + "task_types": [], + "task_names": [], + "type": "builtin_path", + "builtin_path": "{BUILTIN_OCIO_ROOT}/aces_1.2/config.ocio", + "custom_path": "", + "product_name": "", + } + ], "file_rules": { "activate_global_file_rules": False, "rules": [ @@ -199,42 +261,57 @@ DEFAULT_VALUES = { "name": "example", "pattern": ".*(beauty).*", "colorspace": "ACES - ACEScg", - "ext": "exr" + "ext": "exr", } - ] - } + ], + }, }, "studio_name": "", "studio_code": "", - "environments": "{\n\"STUDIO_SW\": {\n \"darwin\": \"/mnt/REPO_SW\",\n \"linux\": \"/mnt/REPO_SW\",\n \"windows\": \"P:/REPO_SW\"\n }\n}", + "environments": json.dumps( + { + "STUDIO_SW": { + "darwin": "/mnt/REPO_SW", + "linux": "/mnt/REPO_SW", + "windows": "P:/REPO_SW" + } + }, + indent=4 + ), "tools": DEFAULT_TOOLS_VALUES, "version_start_category": { "profiles": [] }, "publish": DEFAULT_PUBLISH_VALUES, - "project_folder_structure": json.dumps({ - "__project_root__": { - "prod": {}, - "resources": { - "footage": { - "plates": {}, - "offline": {} + "project_folder_structure": json.dumps( + { + "__project_root__": { + "prod": {}, + "resources": { + "footage": { + "plates": {}, + "offline": {} + }, + "audio": {}, + "art_dept": {} }, - "audio": {}, - "art_dept": {} - }, - "editorial": {}, - "assets": { - "characters": {}, - "locations": {} - }, - "shots": {} - } - }, indent=4), + "editorial": {}, + "assets": { + "characters": {}, + "locations": {} + }, + "shots": {} + } + }, + indent=4 + ), "project_plugins": { "windows": [], "darwin": [], "linux": [] }, - "project_environments": "{}" + "project_environments": json.dumps( + {}, + indent=4 + ) } diff --git a/server/settings/publish_plugins.py b/server/settings/publish_plugins.py index e61bf6986b..61e73ce912 100644 --- a/server/settings/publish_plugins.py +++ b/server/settings/publish_plugins.py @@ -59,6 +59,33 @@ class CollectFramesFixDefModel(BaseSettingsModel): ) +class ValidateOutdatedContainersProfile(BaseSettingsModel): + _layout = "expanded" + # Filtering + host_names: list[str] = SettingsField( + default_factory=list, + title="Host names" + ) + # Profile values + enabled: bool = SettingsField(True, title="Enabled") + optional: bool = SettingsField(True, title="Optional") + active: bool = SettingsField(True, title="Active") + + +class ValidateOutdatedContainersModel(BaseSettingsModel): + """Validate if Publishing intent was selected. + + It is possible to disable validation for specific publishing context + with profiles. + """ + + _isGroup = True + plugin_state_profiles: list[ValidateOutdatedContainersProfile] = SettingsField( + default_factory=list, + title="Plugin enable state profiles", + ) + + class ValidateIntentProfile(BaseSettingsModel): _layout = "expanded" hosts: list[str] = SettingsField(default_factory=list, title="Host names") @@ -770,6 +797,10 @@ class PublishPuginsModel(BaseSettingsModel): default_factory=ValidateBaseModel, title="Validate Version" ) + ValidateOutdatedContainers: ValidateOutdatedContainersModel = SettingsField( + default_factory=ValidateOutdatedContainersModel, + title="Validate Containers" + ) ValidateIntent: ValidateIntentModel = SettingsField( default_factory=ValidateIntentModel, title="Validate Intent" @@ -855,6 +886,25 @@ DEFAULT_PUBLISH_VALUES = { "optional": False, "active": True }, + "ValidateOutdatedContainers": { + "plugin_state_profiles": [ + { + # Default host names are based on original + # filter of ValidateContainer pyblish plugin + "host_names": [ + "maya", + "houdini", + "nuke", + "harmony", + "photoshop", + "aftereffects" + ], + "enabled": True, + "optional": True, + "active": True + } + ] + }, "ValidateIntent": { "enabled": False, "profiles": [] diff --git a/server_addon/aftereffects/package.py b/server_addon/aftereffects/package.py index a680b37602..7a2f9bc7af 100644 --- a/server_addon/aftereffects/package.py +++ b/server_addon/aftereffects/package.py @@ -1,3 +1,3 @@ name = "aftereffects" title = "AfterEffects" -version = "0.1.3" +version = "0.1.4" diff --git a/server_addon/aftereffects/server/settings/publish_plugins.py b/server_addon/aftereffects/server/settings/publish_plugins.py index 61d67f26d3..a9f30c6686 100644 --- a/server_addon/aftereffects/server/settings/publish_plugins.py +++ b/server_addon/aftereffects/server/settings/publish_plugins.py @@ -22,12 +22,6 @@ class ValidateSceneSettingsModel(BaseSettingsModel): ) -class ValidateContainersModel(BaseSettingsModel): - enabled: bool = SettingsField(True, title="Enabled") - optional: bool = SettingsField(True, title="Optional") - active: bool = SettingsField(True, title="Active") - - class AfterEffectsPublishPlugins(BaseSettingsModel): CollectReview: CollectReviewPluginModel = SettingsField( default_factory=CollectReviewPluginModel, @@ -37,10 +31,6 @@ class AfterEffectsPublishPlugins(BaseSettingsModel): default_factory=ValidateSceneSettingsModel, title="Validate Scene Settings", ) - ValidateContainers: ValidateContainersModel = SettingsField( - default_factory=ValidateContainersModel, - title="Validate Containers", - ) AE_PUBLISH_PLUGINS_DEFAULTS = { @@ -58,9 +48,4 @@ AE_PUBLISH_PLUGINS_DEFAULTS = { ".*" ] }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True, - } } diff --git a/server_addon/applications/client/ayon_applications/addon.py b/server_addon/applications/client/ayon_applications/addon.py index 0f1b68af0e..a8eaa46cad 100644 --- a/server_addon/applications/client/ayon_applications/addon.py +++ b/server_addon/applications/client/ayon_applications/addon.py @@ -110,6 +110,26 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): ] } + def launch_application( + self, app_name, project_name, folder_path, task_name + ): + """Launch application. + + Args: + app_name (str): Full application name e.g. 'maya/2024'. + project_name (str): Project name. + folder_path (str): Folder path. + task_name (str): Task name. + + """ + app_manager = self.get_applications_manager() + return app_manager.launch( + app_name, + project_name=project_name, + folder_path=folder_path, + task_name=task_name, + ) + # --- CLI --- def cli(self, addon_click_group): main_group = click_wrap.group( @@ -134,6 +154,17 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): default=None ) ) + ( + main_group.command( + self._cli_launch_applications, + name="launch", + help="Launch application" + ) + .option("--app", required=True, help="Application name") + .option("--project", required=True, help="Project name") + .option("--folder", required=True, help="Folder path") + .option("--task", required=True, help="Task name") + ) # Convert main command to click object and add it to parent group addon_click_group.add_command( main_group.to_click_obj() @@ -171,3 +202,15 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): with open(output_json_path, "w") as file_stream: json.dump(env, file_stream, indent=4) + + def _cli_launch_applications(self, project, folder, task, app): + """Launch application. + + Args: + project (str): Project name. + folder (str): Folder path. + task (str): Task name. + app (str): Full application name e.g. 'maya/2024'. + + """ + self.launch_application(app, project, folder, task) diff --git a/server_addon/applications/client/ayon_applications/utils.py b/server_addon/applications/client/ayon_applications/utils.py index 234fa6c683..185779a949 100644 --- a/server_addon/applications/client/ayon_applications/utils.py +++ b/server_addon/applications/client/ayon_applications/utils.py @@ -281,13 +281,20 @@ def prepare_app_environments( app.environment ] + task_entity = data.get("task_entity") folder_entity = data.get("folder_entity") # Add tools environments groups_by_name = {} tool_by_group_name = collections.defaultdict(dict) - if folder_entity: - # Make sure each tool group can be added only once - for key in folder_entity["attrib"].get("tools") or []: + tools = None + if task_entity: + tools = task_entity["attrib"].get("tools") + + if tools is None and folder_entity: + tools = folder_entity["attrib"].get("tools") + + if tools: + for key in tools: tool = app.manager.tools.get(key) if not tool or not tool.is_valid_for_app(app): continue diff --git a/server_addon/applications/package.py b/server_addon/applications/package.py index ce312ed662..983749355e 100644 --- a/server_addon/applications/package.py +++ b/server_addon/applications/package.py @@ -1,3 +1,10 @@ name = "applications" title = "Applications" -version = "0.2.0" +version = "0.2.2" + +ayon_server_version = ">=1.0.7" +ayon_launcher_version = ">=1.0.2" +ayon_required_addons = { + "core": ">0.3.0", +} +ayon_compatible_addons = {} diff --git a/server_addon/applications/server/applications.json b/server_addon/applications/server/applications.json index e4b72fdff9..84b7fa33cf 100644 --- a/server_addon/applications/server/applications.json +++ b/server_addon/applications/server/applications.json @@ -1271,6 +1271,28 @@ } ] }, + "equalizer": { + "enabled": true, + "label": "3DEqualizer", + "icon": "{}/app_icons/3de4.png", + "host_name": "equalizer", + "environment": "{}", + "variants": [ + { + "name": "7-1v2", + "label": "7.1v2", + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\3DE4_win64_r7.1v2\\bin\\3DE4.exe" + ], + "darwin": [], + "linux": [] + }, + "environment": "{}" + } + ] + }, "additional_apps": [] } } diff --git a/server_addon/applications/server/settings.py b/server_addon/applications/server/settings.py index 9e915b6152..ec6b4ba1d7 100644 --- a/server_addon/applications/server/settings.py +++ b/server_addon/applications/server/settings.py @@ -198,6 +198,8 @@ class ApplicationsSettings(BaseSettingsModel): default_factory=AppGroupWithPython, title="OpenRV") zbrush: AppGroup = SettingsField( default_factory=AppGroupWithPython, title="Zbrush") + equalizer: AppGroup = SettingsField( + default_factory=AppGroupWithPython, title="3DEqualizer") additional_apps: list[AdditionalAppGroup] = SettingsField( default_factory=list, title="Additional Applications") diff --git a/server_addon/create_ayon_addons.py b/server_addon/create_ayon_addons.py index f0a36d4740..749077d2a8 100644 --- a/server_addon/create_ayon_addons.py +++ b/server_addon/create_ayon_addons.py @@ -47,7 +47,7 @@ plugin_for = ["ayon_server"] """ CLIENT_VERSION_CONTENT = '''# -*- coding: utf-8 -*- -"""Package declaring AYON core addon version.""" +"""Package declaring AYON addon '{}' version.""" __version__ = "{}" ''' @@ -183,6 +183,7 @@ def create_addon_zip( def prepare_client_code( + addon_name: str, addon_dir: Path, addon_output_dir: Path, addon_version: str @@ -211,7 +212,9 @@ def prepare_client_code( version_path = subpath / "version.py" if version_path.exists(): with open(version_path, "w") as stream: - stream.write(CLIENT_VERSION_CONTENT.format(addon_version)) + stream.write( + CLIENT_VERSION_CONTENT.format(addon_name, addon_version) + ) zip_filepath = private_dir / "client.zip" with ZipFileLongPaths(zip_filepath, "w", zipfile.ZIP_DEFLATED) as zipf: @@ -262,7 +265,9 @@ def create_addon_package( server_dir, addon_output_dir / "server", dirs_exist_ok=True ) - prepare_client_code(addon_dir, addon_output_dir, addon_version) + prepare_client_code( + package.name, addon_dir, addon_output_dir, addon_version + ) if create_zip: create_addon_zip( diff --git a/server_addon/deadline/package.py b/server_addon/deadline/package.py index 944797fea6..e26734c813 100644 --- a/server_addon/deadline/package.py +++ b/server_addon/deadline/package.py @@ -1,3 +1,3 @@ name = "deadline" title = "Deadline" -version = "0.1.10" +version = "0.1.12" diff --git a/server_addon/deadline/server/__init__.py b/server_addon/deadline/server/__init__.py index e7dcb7d347..8d2dc152cd 100644 --- a/server_addon/deadline/server/__init__.py +++ b/server_addon/deadline/server/__init__.py @@ -2,11 +2,13 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .settings import DeadlineSettings, DEFAULT_VALUES +from .settings import DeadlineSettings, DEFAULT_VALUES, DeadlineSiteSettings class Deadline(BaseServerAddon): settings_model: Type[DeadlineSettings] = DeadlineSettings + site_settings_model: Type[DeadlineSiteSettings] = DeadlineSiteSettings + async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/deadline/server/settings/__init__.py b/server_addon/deadline/server/settings/__init__.py index 0307862afa..d25c0fb330 100644 --- a/server_addon/deadline/server/settings/__init__.py +++ b/server_addon/deadline/server/settings/__init__.py @@ -2,9 +2,11 @@ from .main import ( DeadlineSettings, DEFAULT_VALUES, ) +from .site_settings import DeadlineSiteSettings __all__ = ( "DeadlineSettings", + "DeadlineSiteSettings", "DEFAULT_VALUES", ) diff --git a/server_addon/deadline/server/settings/main.py b/server_addon/deadline/server/settings/main.py index 21a314cd2f..47ad72a86f 100644 --- a/server_addon/deadline/server/settings/main.py +++ b/server_addon/deadline/server/settings/main.py @@ -15,12 +15,6 @@ from .publish_plugins import ( ) -class ServerListSubmodel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Name") - value: str = SettingsField(title="Value") - - async def defined_deadline_ws_name_enum_resolver( addon: "BaseServerAddon", settings_variant: str = "production", @@ -32,25 +26,39 @@ async def defined_deadline_ws_name_enum_resolver( settings = await addon.get_studio_settings(variant=settings_variant) - ws_urls = [] + ws_server_name = [] for deadline_url_item in settings.deadline_urls: - ws_urls.append(deadline_url_item.name) + ws_server_name.append(deadline_url_item.name) - return ws_urls + return ws_server_name + +class ServerItemSubmodel(BaseSettingsModel): + """Connection info about configured DL servers.""" + _layout = "compact" + name: str = SettingsField(title="Name") + value: str = SettingsField(title="Url") + require_authentication: bool = SettingsField( + False, title="Require authentication") + not_verify_ssl: bool = SettingsField( + False, title="Don't verify SSL") class DeadlineSettings(BaseSettingsModel): - deadline_urls: list[ServerListSubmodel] = SettingsField( + # configured DL servers + deadline_urls: list[ServerItemSubmodel] = SettingsField( default_factory=list, - title="System Deadline Webservice URLs", + title="System Deadline Webservice Info", scope=["studio"], ) + + # name(key) of selected server for project deadline_server: str = SettingsField( - title="Project deadline server", + title="Project Deadline server name", section="---", scope=["project"], enum_resolver=defined_deadline_ws_name_enum_resolver ) + publish: PublishPluginsModel = SettingsField( default_factory=PublishPluginsModel, title="Publish Plugins", @@ -62,11 +70,14 @@ class DeadlineSettings(BaseSettingsModel): return value + DEFAULT_VALUES = { "deadline_urls": [ { "name": "default", - "value": "http://127.0.0.1:8082" + "value": "http://127.0.0.1:8082", + "require_authentication": False, + "not_verify_ssl": False } ], "deadline_server": "default", diff --git a/server_addon/deadline/server/settings/publish_plugins.py b/server_addon/deadline/server/settings/publish_plugins.py index 9f69143e37..784ad2560b 100644 --- a/server_addon/deadline/server/settings/publish_plugins.py +++ b/server_addon/deadline/server/settings/publish_plugins.py @@ -191,7 +191,6 @@ class NukeSubmitDeadlineModel(BaseSettingsModel): @validator( "limit_groups", - "env_allowed_keys", "env_search_replace_values") def validate_unique_names(cls, value): ensure_unique_names(value) diff --git a/server_addon/deadline/server/settings/site_settings.py b/server_addon/deadline/server/settings/site_settings.py new file mode 100644 index 0000000000..a77a6edc7e --- /dev/null +++ b/server_addon/deadline/server/settings/site_settings.py @@ -0,0 +1,26 @@ +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField, +) +from .main import defined_deadline_ws_name_enum_resolver + + +class CredentialPerServerModel(BaseSettingsModel): + """Provide credentials for configured DL servers""" + _layout = "expanded" + server_name: str = SettingsField("", + title="DL server name", + enum_resolver=defined_deadline_ws_name_enum_resolver) + username: str = SettingsField("", + title="Username") + password: str = SettingsField("", + title="Password") + + +class DeadlineSiteSettings(BaseSettingsModel): + local_settings: list[CredentialPerServerModel] = SettingsField( + default_factory=list, + title="Local setting", + description="Please provide credentials for configured Deadline servers", + ) + diff --git a/server_addon/harmony/package.py b/server_addon/harmony/package.py index 83e88e7d57..00824cedef 100644 --- a/server_addon/harmony/package.py +++ b/server_addon/harmony/package.py @@ -1,3 +1,3 @@ name = "harmony" title = "Harmony" -version = "0.1.2" +version = "0.1.3" diff --git a/server_addon/harmony/server/settings/main.py b/server_addon/harmony/server/settings/main.py index 9c780b63c2..8a72c966d8 100644 --- a/server_addon/harmony/server/settings/main.py +++ b/server_addon/harmony/server/settings/main.py @@ -45,11 +45,6 @@ DEFAULT_HARMONY_SETTING = { "optional": True, "active": True }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateSceneSettings": { "enabled": True, "optional": True, diff --git a/server_addon/harmony/server/settings/publish_plugins.py b/server_addon/harmony/server/settings/publish_plugins.py index c9e7c515e4..2d976389f6 100644 --- a/server_addon/harmony/server/settings/publish_plugins.py +++ b/server_addon/harmony/server/settings/publish_plugins.py @@ -18,14 +18,6 @@ class ValidateAudioPlugin(BaseSettingsModel): active: bool = SettingsField(True, title="Active") -class ValidateContainersPlugin(BaseSettingsModel): - """Check if loaded container is scene are latest versions.""" - _isGroup = True - enabled: bool = True - optional: bool = SettingsField(False, title="Optional") - active: bool = SettingsField(True, title="Active") - - class ValidateSceneSettingsPlugin(BaseSettingsModel): """Validate if FrameStart, FrameEnd and Resolution match shot data in DB. Use regular expressions to limit validations only on particular asset @@ -63,11 +55,6 @@ class HarmonyPublishPlugins(BaseSettingsModel): default_factory=ValidateAudioPlugin, ) - ValidateContainers: ValidateContainersPlugin = SettingsField( - title="Validate Containers", - default_factory=ValidateContainersPlugin, - ) - ValidateSceneSettings: ValidateSceneSettingsPlugin = SettingsField( title="Validate Scene Settings", default_factory=ValidateSceneSettingsPlugin, diff --git a/server_addon/hiero/package.py b/server_addon/hiero/package.py index cabe68eb68..54c2f74fa7 100644 --- a/server_addon/hiero/package.py +++ b/server_addon/hiero/package.py @@ -1,3 +1,3 @@ name = "hiero" title = "Hiero" -version = "0.1.2" +version = "0.1.3" diff --git a/server_addon/hiero/server/settings/imageio.py b/server_addon/hiero/server/settings/imageio.py index f2bc71ac33..9e15e15597 100644 --- a/server_addon/hiero/server/settings/imageio.py +++ b/server_addon/hiero/server/settings/imageio.py @@ -149,15 +149,15 @@ class ImageIOSettings(BaseSettingsModel): DEFAULT_IMAGEIO_SETTINGS = { "workfile": { - "ocioConfigName": "nuke-default", - "workingSpace": "linear", - "viewerLut": "sRGB", - "eightBitLut": "sRGB", - "sixteenBitLut": "sRGB", - "logLut": "Cineon", - "floatLut": "linear", - "thumbnailLut": "sRGB", - "monitorOutLut": "sRGB" + "ocioConfigName": "aces_1.2", + "workingSpace": "role_scene_linear", + "viewerLut": "ACES/sRGB", + "eightBitLut": "role_matte_paint", + "sixteenBitLut": "role_texture_paint", + "logLut": "role_compositing_log", + "floatLut": "role_scene_linear", + "thumbnailLut": "ACES/sRGB", + "monitorOutLut": "ACES/sRGB" }, "regexInputs": { "inputs": [ diff --git a/server_addon/houdini/package.py b/server_addon/houdini/package.py index 4e441c76ae..06b034da38 100644 --- a/server_addon/houdini/package.py +++ b/server_addon/houdini/package.py @@ -1,3 +1,3 @@ name = "houdini" title = "Houdini" -version = "0.2.13" +version = "0.2.15" diff --git a/server_addon/houdini/server/settings/publish.py b/server_addon/houdini/server/settings/publish.py index 8e0e7f7795..4a0c022f23 100644 --- a/server_addon/houdini/server/settings/publish.py +++ b/server_addon/houdini/server/settings/publish.py @@ -1,4 +1,7 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField +) # Publish Plugins @@ -20,6 +23,27 @@ class CollectChunkSizeModel(BaseSettingsModel): title="Frames Per Task") +class AOVFilterSubmodel(BaseSettingsModel): + """You should use the same host name you are using for Houdini.""" + host_name: str = SettingsField("", title="Houdini Host name") + value: list[str] = SettingsField( + default_factory=list, + title="AOV regex" + ) + +class CollectLocalRenderInstancesModel(BaseSettingsModel): + + use_deadline_aov_filter: bool = SettingsField( + False, + title="Use Deadline AOV Filter" + ) + + aov_filter: AOVFilterSubmodel = SettingsField( + default_factory=AOVFilterSubmodel, + title="Reviewable products filter" + ) + + class ValidateWorkfilePathsModel(BaseSettingsModel): enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") @@ -49,10 +73,10 @@ class PublishPluginsModel(BaseSettingsModel): default_factory=CollectChunkSizeModel, title="Collect Chunk Size." ) - ValidateContainers: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Latest Containers.", - section="Validators") + CollectLocalRenderInstances: CollectLocalRenderInstancesModel = SettingsField( + default_factory=CollectLocalRenderInstancesModel, + title="Collect Local Render Instances." + ) ValidateInstanceInContextHoudini: BasicValidateModel = SettingsField( default_factory=BasicValidateModel, title="Validate Instance is in same Context.") @@ -82,10 +106,14 @@ DEFAULT_HOUDINI_PUBLISH_SETTINGS = { "optional": True, "chunk_size": 999999 }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True + "CollectLocalRenderInstances": { + "use_deadline_aov_filter": False, + "aov_filter" : { + "host_name": "houdini", + "value": [ + ".*([Bb]eauty).*" + ] + } }, "ValidateInstanceInContextHoudini": { "enabled": True, diff --git a/server_addon/maya/package.py b/server_addon/maya/package.py index 00f28d901e..4537c23eaa 100644 --- a/server_addon/maya/package.py +++ b/server_addon/maya/package.py @@ -1,3 +1,3 @@ name = "maya" title = "Maya" -version = "0.1.16" +version = "0.1.20" diff --git a/server_addon/maya/server/settings/publishers.py b/server_addon/maya/server/settings/publishers.py index 27288053a2..9c552e17fa 100644 --- a/server_addon/maya/server/settings/publishers.py +++ b/server_addon/maya/server/settings/publishers.py @@ -35,6 +35,50 @@ def angular_unit_enum(): ] +def extract_alembic_data_format_enum(): + return [ + {"label": "ogawa", "value": "ogawa"}, + {"label": "HDF", "value": "HDF"} + ] + + +def extract_alembic_overrides_enum(): + return [ + {"label": "Custom Attributes", "value": "attr"}, + {"label": "Custom Attributes Prefix", "value": "attrPrefix"}, + {"label": "Data Format", "value": "dataFormat"}, + {"label": "Euler Filter", "value": "eulerFilter"}, + {"label": "Mel Per Frame Callback", "value": "melPerFrameCallback"}, + {"label": "Mel Post Job Callback", "value": "melPostJobCallback"}, + {"label": "Pre Roll", "value": "preRoll"}, + {"label": "Pre Roll Start Frame", "value": "preRollStartFrame"}, + { + "label": "Python Per Frame Callback", + "value": "pythonPerFrameCallback" + }, + { + "label": "Python Post Job Callback", + "value": "pythonPostJobCallback" + }, + {"label": "Renderable Only", "value": "renderableOnly"}, + {"label": "Strip Namespaces", "value": "stripNamespaces"}, + {"label": "User Attr", "value": "userAttr"}, + {"label": "User Attr Prefix", "value": "userAttrPrefix"}, + {"label": "UV Write", "value": "uvWrite"}, + {"label": "UVs Only", "value": "uvsOnly"}, + {"label": "Verbose", "value": "verbose"}, + {"label": "Visible Only", "value": "visibleOnly"}, + {"label": "Whole Frame Geo", "value": "wholeFrameGeo"}, + {"label": "World Space", "value": "worldSpace"}, + {"label": "Write Color Sets", "value": "writeColorSets"}, + {"label": "Write Creases", "value": "writeCreases"}, + {"label": "Write Face Sets", "value": "writeFaceSets"}, + {"label": "Write Normals", "value": "writeNormals"}, + {"label": "Write UV Sets", "value": "writeUVSets"}, + {"label": "Write Visibility", "value": "writeVisibility"} + ] + + class BasicValidateModel(BaseSettingsModel): enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") @@ -184,7 +228,7 @@ class ValidateAttributesModel(BaseSettingsModel): if not success: raise BadRequestException( - "The attibutes can't be parsed as json object" + "The attributes can't be parsed as json object" ) return value @@ -220,7 +264,7 @@ class ValidateUnrealStaticMeshNameModel(BaseSettingsModel): enabled: bool = SettingsField(title="ValidateUnrealStaticMeshName") optional: bool = SettingsField(title="Optional") validate_mesh: bool = SettingsField(title="Validate mesh names") - validate_collision: bool = SettingsField(title="Validate collison names") + validate_collision: bool = SettingsField(title="Validate collision names") class ValidateCycleErrorModel(BaseSettingsModel): @@ -243,7 +287,7 @@ class ValidatePluginPathAttributesModel(BaseSettingsModel): and the node attribute is abc_file """ - enabled: bool = True + enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") active: bool = SettingsField(title="Active") attribute: list[ValidatePluginPathAttributesAttrModel] = SettingsField( @@ -265,6 +309,9 @@ class RendererAttributesModel(BaseSettingsModel): class ValidateRenderSettingsModel(BaseSettingsModel): + enabled: bool = SettingsField(title="Enabled") + optional: bool = SettingsField(title="Optional") + active: bool = SettingsField(title="Active") arnold_render_attributes: list[RendererAttributesModel] = SettingsField( default_factory=list, title="Arnold Render Attributes") vray_render_attributes: list[RendererAttributesModel] = SettingsField( @@ -299,6 +346,108 @@ class ExtractAlembicModel(BaseSettingsModel): families: list[str] = SettingsField( default_factory=list, title="Families") + eulerFilter: bool = SettingsField( + title="Euler Filter", + description="Apply Euler filter while sampling rotations." + ) + renderableOnly: bool = SettingsField( + title="Renderable Only", + description="Only export renderable visible shapes." + ) + stripNamespaces: bool = SettingsField( + title="Strip Namespaces", + description=( + "Namespaces will be stripped off of the node before being written " + "to Alembic." + ) + ) + uvsOnly: bool = SettingsField( + title="UVs Only", + description=( + "If this flag is present, only uv data for PolyMesh and SubD " + "shapes will be written to the Alembic file." + ) + ) + uvWrite: bool = SettingsField( + title="UV Write", + description=( + "Uv data for PolyMesh and SubD shapes will be written to the " + "Alembic file." + ) + ) + verbose: bool = SettingsField( + title="Verbose", + description="Prints the current frame that is being evaluated." + ) + visibleOnly: bool = SettingsField( + title="Visible Only", + description="Only export dag objects visible during frame range." + ) + wholeFrameGeo: bool = SettingsField( + title="Whole Frame Geo", + description=( + "Data for geometry will only be written out on whole frames." + ) + ) + worldSpace: bool = SettingsField( + title="World Space", + description="Any root nodes will be stored in world space." + ) + writeColorSets: bool = SettingsField( + title="Write Color Sets", + description="Write vertex colors with the geometry." + ) + writeCreases: bool = SettingsField( + title="Write Creases", + description="Write the geometry's edge and vertex crease information." + ) + writeFaceSets: bool = SettingsField( + title="Write Face Sets", + description="Write face sets with the geometry." + ) + writeNormals: bool = SettingsField( + title="Write Normals", + description="Write normals with the deforming geometry." + ) + writeUVSets: bool = SettingsField( + title="Write UV Sets", + description=( + "Write all uv sets on MFnMeshes as vector 2 indexed geometry " + "parameters with face varying scope." + ) + ) + writeVisibility: bool = SettingsField( + title="Write Visibility", + description=( + "Visibility state will be stored in the Alembic file. Otherwise " + "everything written out is treated as visible." + ) + ) + preRoll: bool = SettingsField( + title="Pre Roll", + description=( + "When enabled, the pre roll start frame is used to pre roll the " + "When enabled, the pre roll start frame is used to being the " + "evaluation of the mesh. From the pre roll start frame to the " + "alembic start frame, will not be written to disk. This can be " + "used for simulation run up." + ) + ) + preRollStartFrame: int = SettingsField( + title="Pre Roll Start Frame", + description=( + "The frame to start scene evaluation at. This is used to set the " + "starting frame for time dependent translations and can be used to" + " evaluate run-up that isn't actually translated.\n" + "NOTE: Pre Roll needs to be enabled for this start frame " + "to be considered." + ) + ) + dataFormat: str = SettingsField( + enum_resolver=extract_alembic_data_format_enum, + title="Data Format", + description="The data format to use to write the file." + ) bake_attributes: list[str] = SettingsField( default_factory=list, title="Bake Attributes", description="List of attributes that will be included in the alembic " @@ -309,6 +458,73 @@ class ExtractAlembicModel(BaseSettingsModel): description="List of attribute prefixes for attributes that will be " "included in the alembic export.", ) + attr: str = SettingsField( + title="Custom Attributes", + placeholder="attr1;attr2", + description=( + "Attributes matching by name will be included in the Alembic " + "export. Attributes should be separated by semi-colon `;`" + ) + ) + attrPrefix: str = SettingsField( + title="Custom Attributes Prefix", + placeholder="prefix1;prefix2", + description=( + "Attributes starting with these prefixes will be included in the " + "Alembic export. Attributes should be separated by semi-colon `;`" + ) + ) + userAttr: str = SettingsField( + title="User Attr", + placeholder="attr1;attr2", + description=( + "Attributes matching by name will be included in the Alembic " + "export. Attributes should be separated by semi-colon `;`" + ) + ) + userAttrPrefix: str = SettingsField( + title="User Attr Prefix", + placeholder="prefix1;prefix2", + description=( + "Attributes starting with these prefixes will be included in the " + "Alembic export. Attributes should be separated by semi-colon `;`" + ) + ) + melPerFrameCallback: str = SettingsField( + title="Mel Per Frame Callback", + description=( + "When each frame (and the static frame) is evaluated the string " + "specified is evaluated as a Mel command." + ) + ) + melPostJobCallback: str = SettingsField( + title="Mel Post Job Callback", + description=( + "When the translation has finished the string specified is " + "evaluated as a Mel command." + ) + ) + pythonPerFrameCallback: str = SettingsField( + title="Python Per Frame Callback", + description=( + "When each frame (and the static frame) is evaluated the string " + "specified is evaluated as a python command." + ) + ) + pythonPostJobCallback: str = SettingsField( + title="Python Post Job Callback", + description=( + "When the translation has finished the string specified is " + "evaluated as a python command." + ) + ) + overrides: list[str] = SettingsField( + enum_resolver=extract_alembic_overrides_enum, + title="Exposed Overrides", + description=( + "Expose the attribute in this list to the user when publishing." + ) + ) class ExtractObjModel(BaseSettingsModel): @@ -392,7 +608,7 @@ class ExtractGPUCacheModel(BaseSettingsModel): title="Optimize Animations For Motion Blur" ) writeMaterials: bool = SettingsField(title="Write Materials") - useBaseTessellation: bool = SettingsField(title="User Base Tesselation") + useBaseTessellation: bool = SettingsField(title="User Based Tessellation") class PublishersModel(BaseSettingsModel): @@ -418,10 +634,6 @@ class PublishersModel(BaseSettingsModel): title="Validate Instance In Context", section="Validators" ) - ValidateContainers: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Containers" - ) ValidateFrameRange: ValidateFrameRangeModel = SettingsField( default_factory=ValidateFrameRangeModel, title="Validate Frame Range" @@ -668,15 +880,19 @@ class PublishersModel(BaseSettingsModel): default_factory=BasicValidateModel, title="Validate Alembic Visible Node", ) + ValidateAlembicDefaultsPointcache: BasicValidateModel = SettingsField( + default_factory=BasicValidateModel, + title="Validate Alembic Defaults Pointcache" + ) + ValidateAlembicDefaultsAnimation: BasicValidateModel = SettingsField( + default_factory=BasicValidateModel, + title="Validate Alembic Defaults Animation" + ) ExtractProxyAlembic: ExtractProxyAlembicModel = SettingsField( default_factory=ExtractProxyAlembicModel, title="Extract Proxy Alembic", section="Model Extractors", ) - ExtractAlembic: ExtractAlembicModel = SettingsField( - default_factory=ExtractAlembicModel, - title="Extract Alembic", - ) ExtractObj: ExtractObjModel = SettingsField( default_factory=ExtractObjModel, title="Extract OBJ" @@ -697,10 +913,6 @@ class PublishersModel(BaseSettingsModel): default_factory=BasicValidateModel, title="Validate Rig Controllers", ) - ValidateAnimatedReferenceRig: BasicValidateModel = SettingsField( - default_factory=BasicValidateModel, - title="Validate Animated Reference Rig", - ) ValidateAnimationContent: BasicValidateModel = SettingsField( default_factory=BasicValidateModel, title="Validate Animation Content", @@ -811,6 +1023,10 @@ class PublishersModel(BaseSettingsModel): default_factory=ExtractModelModel, title="Extract Model (Maya Scene)" ) + ExtractAlembic: ExtractAlembicModel = SettingsField( + default_factory=ExtractAlembicModel, + title="Extract Alembic" + ) DEFAULT_SUFFIX_NAMING = { @@ -839,11 +1055,6 @@ DEFAULT_PUBLISH_SETTINGS = { "optional": True, "active": True }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateFrameRange": { "enabled": True, "optional": True, @@ -942,6 +1153,9 @@ DEFAULT_PUBLISH_SETTINGS = { ] }, "ValidateRenderSettings": { + "enabled": True, + "active": True, + "optional": False, "arnold_render_attributes": [], "vray_render_attributes": [], "redshift_render_attributes": [], @@ -1200,16 +1414,6 @@ DEFAULT_PUBLISH_SETTINGS = { "proxyAbc" ] }, - "ExtractAlembic": { - "enabled": True, - "families": [ - "pointcache", - "model", - "vrayproxy.alembic" - ], - "bake_attributes": [], - "bake_attribute_prefixes": [] - }, "ExtractObj": { "enabled": False, "optional": True, @@ -1230,11 +1434,6 @@ DEFAULT_PUBLISH_SETTINGS = { "optional": True, "active": True }, - "ValidateAnimatedReferenceRig": { - "enabled": True, - "optional": False, - "active": True - }, "ValidateAnimationContent": { "enabled": True, "optional": False, @@ -1330,6 +1529,16 @@ DEFAULT_PUBLISH_SETTINGS = { "optional": False, "validate_shapes": True }, + "ValidateAlembicDefaultsPointcache": { + "enabled": True, + "optional": True, + "active": True + }, + "ValidateAlembicDefaultsAnimation": { + "enabled": True, + "optional": True, + "active": True + }, "ExtractPlayblast": DEFAULT_PLAYBLAST_SETTING, "ExtractMayaSceneRaw": { "enabled": True, @@ -1371,6 +1580,52 @@ DEFAULT_PUBLISH_SETTINGS = { "ExtractModel": { "enabled": True, "optional": True, - "active": True, + "active": True + }, + "ExtractAlembic": { + "enabled": True, + "families": [ + "pointcache", + "model", + "vrayproxy.alembic" + ], + "attr": "", + "attrPrefix": "", + "bake_attributes": [], + "bake_attribute_prefixes": [], + "dataFormat": "ogawa", + "eulerFilter": False, + "melPerFrameCallback": "", + "melPostJobCallback": "", + "overrides": [ + "attr", + "attrPrefix", + "renderableOnly", + "visibleOnly", + "worldSpace", + "writeColorSets", + "writeFaceSets", + "writeNormals" + ], + "preRoll": False, + "preRollStartFrame": 0, + "pythonPerFrameCallback": "", + "pythonPostJobCallback": "", + "renderableOnly": False, + "stripNamespaces": True, + "uvsOnly": False, + "uvWrite": True, + "userAttr": "", + "userAttrPrefix": "", + "verbose": False, + "visibleOnly": False, + "wholeFrameGeo": False, + "worldSpace": True, + "writeColorSets": False, + "writeCreases": False, + "writeFaceSets": False, + "writeNormals": True, + "writeUVSets": False, + "writeVisibility": False } } diff --git a/server_addon/nuke/package.py b/server_addon/nuke/package.py index 9630c370bc..bc166bd14e 100644 --- a/server_addon/nuke/package.py +++ b/server_addon/nuke/package.py @@ -1,3 +1,3 @@ name = "nuke" title = "Nuke" -version = "0.1.10" +version = "0.1.13" diff --git a/server_addon/nuke/server/settings/imageio.py b/server_addon/nuke/server/settings/imageio.py index 1b84457133..9cdb0bf1d7 100644 --- a/server_addon/nuke/server/settings/imageio.py +++ b/server_addon/nuke/server/settings/imageio.py @@ -97,8 +97,23 @@ class WorkfileColorspaceSettings(BaseSettingsModel): working_space: str = SettingsField( title="Working Space" ) - thumbnail_space: str = SettingsField( - title="Thumbnail Space" + monitor_lut: str = SettingsField( + title="Thumbnails" + ) + monitor_out_lut: str = SettingsField( + title="Monitor Out" + ) + int_8_lut: str = SettingsField( + title="8-bit Files" + ) + int_16_lut: str = SettingsField( + title="16-bit Files" + ) + log_lut: str = SettingsField( + title="Log Files" + ) + float_lut: str = SettingsField( + title="Float Files" ) @@ -120,6 +135,9 @@ class ViewProcessModel(BaseSettingsModel): viewerProcess: str = SettingsField( title="Viewer Process Name" ) + output_transform: str = SettingsField( + title="Output Transform" + ) class ImageIOConfigModel(BaseSettingsModel): @@ -214,16 +232,23 @@ class ImageIOSettings(BaseSettingsModel): DEFAULT_IMAGEIO_SETTINGS = { "viewer": { - "viewerProcess": "sRGB (default)" + "viewerProcess": "ACES/sRGB", + "output_transform": "ACES/sRGB" }, "baking": { - "viewerProcess": "rec709 (default)" + "viewerProcess": "ACES/Rec.709", + "output_transform": "ACES/Rec.709" }, "workfile": { "color_management": "OCIO", - "native_ocio_config": "nuke-default", - "working_space": "scene_linear", - "thumbnail_space": "sRGB (default)", + "native_ocio_config": "aces_1.2", + "working_space": "role_scene_linear", + "monitor_lut": "ACES/sRGB", + "monitor_out_lut": "ACES/sRGB", + "int_8_lut": "role_matte_paint", + "int_16_lut": "role_texture_paint", + "log_lut": "role_compositing_log", + "float_lut": "role_scene_linear" }, "nodes": { "required_nodes": [ diff --git a/server_addon/nuke/server/settings/publish_plugins.py b/server_addon/nuke/server/settings/publish_plugins.py index d5b05d8715..6c37ecd37a 100644 --- a/server_addon/nuke/server/settings/publish_plugins.py +++ b/server_addon/nuke/server/settings/publish_plugins.py @@ -125,6 +125,7 @@ class ReformatNodesConfigModel(BaseSettingsModel): class IntermediateOutputModel(BaseSettingsModel): name: str = SettingsField(title="Output name") + publish: bool = SettingsField(title="Publish") filter: BakingStreamFilterModel = SettingsField( title="Filter", default_factory=BakingStreamFilterModel) read_raw: bool = SettingsField( @@ -230,10 +231,6 @@ class PublishPluginsModel(BaseSettingsModel): default_factory=OptionalPluginModel, section="Validators" ) - ValidateContainers: OptionalPluginModel = SettingsField( - title="Validate Containers", - default_factory=OptionalPluginModel - ) ValidateKnobs: ValidateKnobsModel = SettingsField( title="Validate Knobs", default_factory=ValidateKnobsModel @@ -299,11 +296,6 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = { "optional": True, "active": True }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateKnobs": { "enabled": False, "knobs": "\n".join([ @@ -346,6 +338,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = { "outputs": [ { "name": "baking", + "publish": False, "filter": { "task_types": [], "product_types": [], @@ -401,6 +394,7 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = { "outputs": [ { "name": "baking", + "publish": False, "filter": { "task_types": [], "product_types": [], diff --git a/server_addon/photoshop/package.py b/server_addon/photoshop/package.py index 25615529d1..22043f951c 100644 --- a/server_addon/photoshop/package.py +++ b/server_addon/photoshop/package.py @@ -1,3 +1,3 @@ name = "photoshop" title = "Photoshop" -version = "0.1.2" +version = "0.1.3" diff --git a/server_addon/photoshop/server/settings/publish_plugins.py b/server_addon/photoshop/server/settings/publish_plugins.py index d04faaf53a..149b08beb4 100644 --- a/server_addon/photoshop/server/settings/publish_plugins.py +++ b/server_addon/photoshop/server/settings/publish_plugins.py @@ -83,14 +83,6 @@ class CollectVersionPlugin(BaseSettingsModel): enabled: bool = SettingsField(True, title="Enabled") -class ValidateContainersPlugin(BaseSettingsModel): - """Check that workfile contains latest version of loaded items""" # noqa - _isGroup = True - enabled: bool = True - optional: bool = SettingsField(False, title="Optional") - active: bool = SettingsField(True, title="Active") - - class ValidateNamingPlugin(BaseSettingsModel): """Validate naming of products and layers""" # noqa invalid_chars: str = SettingsField( @@ -154,11 +146,6 @@ class PhotoshopPublishPlugins(BaseSettingsModel): default_factory=CollectVersionPlugin, ) - ValidateContainers: ValidateContainersPlugin = SettingsField( - title="Validate Containers", - default_factory=ValidateContainersPlugin, - ) - ValidateNaming: ValidateNamingPlugin = SettingsField( title="Validate naming of products and layers", default_factory=ValidateNamingPlugin, @@ -187,11 +174,6 @@ DEFAULT_PUBLISH_SETTINGS = { "CollectVersion": { "enabled": False }, - "ValidateContainers": { - "enabled": True, - "optional": True, - "active": True - }, "ValidateNaming": { "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,;]", "replace_char": "_" diff --git a/server_addon/traypublisher/package.py b/server_addon/traypublisher/package.py index 4ca8ae9fd3..c138a2296d 100644 --- a/server_addon/traypublisher/package.py +++ b/server_addon/traypublisher/package.py @@ -1,3 +1,3 @@ name = "traypublisher" title = "TrayPublisher" -version = "0.1.4" +version = "0.1.5" diff --git a/server_addon/traypublisher/server/settings/publish_plugins.py b/server_addon/traypublisher/server/settings/publish_plugins.py index f413c86227..99a0bbf107 100644 --- a/server_addon/traypublisher/server/settings/publish_plugins.py +++ b/server_addon/traypublisher/server/settings/publish_plugins.py @@ -1,4 +1,7 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField, +) class ValidatePluginModel(BaseSettingsModel): @@ -14,6 +17,45 @@ class ValidateFrameRangeModel(ValidatePluginModel): 'my_asset_to_publish.mov')""" +class ExtractEditorialPckgFFmpegModel(BaseSettingsModel): + video_filters: list[str] = SettingsField( + default_factory=list, + title="Video filters" + ) + audio_filters: list[str] = SettingsField( + default_factory=list, + title="Audio filters" + ) + input: list[str] = SettingsField( + default_factory=list, + title="Input arguments" + ) + output: list[str] = SettingsField( + default_factory=list, + title="Output arguments" + ) + + +class ExtractEditorialPckgOutputDefModel(BaseSettingsModel): + _layout = "expanded" + ext: str = SettingsField("", title="Output extension") + + ffmpeg_args: ExtractEditorialPckgFFmpegModel = SettingsField( + default_factory=ExtractEditorialPckgFFmpegModel, + title="FFmpeg arguments" + ) + + +class ExtractEditorialPckgConversionModel(BaseSettingsModel): + """Set output definition if resource files should be converted.""" + conversion_enabled: bool = SettingsField(True, + title="Conversion enabled") + output: ExtractEditorialPckgOutputDefModel = SettingsField( + default_factory=ExtractEditorialPckgOutputDefModel, + title="Output Definitions", + ) + + class TrayPublisherPublishPlugins(BaseSettingsModel): CollectFrameDataFromAssetEntity: ValidatePluginModel = SettingsField( default_factory=ValidatePluginModel, @@ -28,6 +70,13 @@ class TrayPublisherPublishPlugins(BaseSettingsModel): default_factory=ValidatePluginModel, ) + ExtractEditorialPckgConversion: ExtractEditorialPckgConversionModel = ( + SettingsField( + default_factory=ExtractEditorialPckgConversionModel, + title="Extract Editorial Package Conversion" + ) + ) + DEFAULT_PUBLISH_PLUGINS = { "CollectFrameDataFromAssetEntity": { @@ -44,5 +93,24 @@ DEFAULT_PUBLISH_PLUGINS = { "enabled": True, "optional": True, "active": True + }, + "ExtractEditorialPckgConversion": { + "optional": False, + "conversion_enabled": True, + "output": { + "ext": "", + "ffmpeg_args": { + "video_filters": [], + "audio_filters": [], + "input": [ + "-apply_trc gamma22" + ], + "output": [ + "-pix_fmt yuv420p", + "-crf 18", + "-intra" + ] + } + } } }