diff --git a/client/ayon_core/hosts/aftereffects/api/pipeline.py b/client/ayon_core/hosts/aftereffects/api/pipeline.py index 105fee64b9..2239040f09 100644 --- a/client/ayon_core/hosts/aftereffects/api/pipeline.py +++ b/client/ayon_core/hosts/aftereffects/api/pipeline.py @@ -8,14 +8,11 @@ from ayon_core.lib import Logger, register_event_callback from ayon_core.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, + register_workfile_build_plugin_path, AVALON_CONTAINER_ID, AVALON_INSTANCE_ID, AYON_INSTANCE_ID, ) -from ayon_core.hosts.aftereffects.api.workfile_template_builder import ( - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin -) from ayon_core.pipeline.load import any_outdated_containers import ayon_core.hosts.aftereffects @@ -40,6 +37,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): @@ -76,6 +74,7 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) register_event_callback("application.launched", application_launch) @@ -118,12 +117,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): item["id"] = "publish_context" self.stub.imprint(item["id"], item) - def get_workfile_build_placeholder_plugins(self): - return [ - AEPlaceholderLoadPlugin, - AEPlaceholderCreatePlugin - ] - # created instances section def list_instances(self): """List all created instances from current workfile which diff --git a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py index aa2f36e8aa..99d5bbb938 100644 --- a/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/aftereffects/api/workfile_template_builder.py @@ -1,6 +1,7 @@ import os.path import uuid import shutil +from abc import abstractmethod from ayon_core.pipeline import registered_host from ayon_core.tools.workfile_template_build import ( @@ -9,13 +10,9 @@ from ayon_core.tools.workfile_template_build import ( from ayon_core.pipeline.workfile.workfile_template_builder import ( AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin + PlaceholderItem ) from ayon_core.hosts.aftereffects.api import get_stub -from ayon_core.hosts.aftereffects.api.lib import set_settings PLACEHOLDER_SET = "PLACEHOLDERS_SET" PLACEHOLDER_ID = "openpype.placeholder" @@ -51,6 +48,10 @@ class AETemplateBuilder(AbstractTemplateBuilder): class AEPlaceholderPlugin(PlaceholderPlugin): """Contains generic methods for all PlaceholderPlugins.""" + @abstractmethod + def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem: + pass + def collect_placeholders(self): """Collect info from file metadata about created placeholders. @@ -63,17 +64,7 @@ class AEPlaceholderPlugin(PlaceholderPlugin): if item.get("plugin_identifier") != self.identifier: continue - if isinstance(self, AEPlaceholderLoadPlugin): - item = LoadPlaceholderItem(item["uuid"], - item["data"], - self) - elif isinstance(self, AEPlaceholderCreatePlugin): - item = CreatePlaceholderItem(item["uuid"], - item["data"], - self) - else: - raise NotImplementedError(f"Not implemented for {type(self)}") - + item = self._create_placeholder_item(item) output.append(item) return output @@ -135,87 +126,6 @@ class AEPlaceholderPlugin(PlaceholderPlugin): stub.imprint(item_id, container_data) -class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin): - """Adds Create placeholder. - - This adds composition and runs Create - """ - identifier = "aftereffects.create" - label = "AfterEffects create" - - def create_placeholder(self, placeholder_data): - stub = get_stub() - name = "CREATEPLACEHOLDER" - item_id = stub.add_item(name, "COMP") - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Replace 'placeholder' with publishable instance. - - Renames prepared composition name, creates publishable instance, sets - frame/duration settings according to DB. - """ - pre_create_data = {"use_selection": True} - item_id, item = self._get_item(placeholder) - get_stub().select_items([item_id]) - self.populate_create_placeholder(placeholder, pre_create_data) - - # apply settings for populated composition - item_id, metadata_item = self._get_item(placeholder) - set_settings(True, True, [item_id]) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - -class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin): - identifier = "aftereffects.load" - label = "AfterEffects load" - - def create_placeholder(self, placeholder_data): - """Creates AE's Placeholder item in Project items list. - - Sets dummy resolution/duration/fps settings, will be replaced when - populated. - """ - stub = get_stub() - name = "LOADERPLACEHOLDER" - item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) - - self._imprint_item(item_id, name, placeholder_data, stub) - - def populate_placeholder(self, placeholder): - """Use Openpype Loader from `placeholder` to create new FootageItems - - New FootageItems are created, files are imported. - """ - self.populate_load_placeholder(placeholder) - errors = placeholder.get_errors() - stub = get_stub() - if errors: - stub.print_msg("\n".join(errors)) - else: - if not placeholder.data["keep_placeholder"]: - metadata = stub.get_metadata() - for item in metadata: - if not item.get("is_placeholder"): - continue - scene_identifier = item.get("uuid") - if (scene_identifier and - scene_identifier == placeholder.scene_identifier): - stub.delete_item(item["members"][0]) - stub.remove_instance(placeholder.scene_identifier, metadata) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def load_succeed(self, placeholder, container): - placeholder_item_id, _ = self._get_item(placeholder) - item_id = container.id - get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) - - def build_workfile_template(*args, **kwargs): builder = AETemplateBuilder(registered_host()) builder.build_template(*args, **kwargs) diff --git a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py index c28042b6ae..ebd4b8f944 100644 --- a/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py +++ b/client/ayon_core/hosts/aftereffects/plugins/publish/collect_render.py @@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance): class CollectAERender(publish.AbstractCollectRender): - order = pyblish.api.CollectorOrder + 0.405 + order = pyblish.api.CollectorOrder + 0.100 label = "Collect After Effects Render Layers" hosts = ["aftereffects"] @@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender): if "review" in instance.families: # to skip ExtractReview locally instance.families.remove("review") + instance.deadline = inst.data.get("deadline") instances.append(instance) diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py new file mode 100644 index 0000000000..c7927f176f --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/create_placeholder.py @@ -0,0 +1,49 @@ +from ayon_core.pipeline.workfile.workfile_template_builder import ( + CreatePlaceholderItem, + PlaceholderCreateMixin +) +from ayon_core.hosts.aftereffects.api import get_stub +from ayon_core.hosts.aftereffects.api.lib import set_settings +import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb + + +class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin, + PlaceholderCreateMixin): + """Adds Create placeholder. + + This adds composition and runs Create + """ + identifier = "aftereffects.create" + label = "AfterEffects create" + + def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem: + return CreatePlaceholderItem( + scene_identifier=item_data["uuid"], + data=item_data["data"], + plugin=self + ) + + def create_placeholder(self, placeholder_data): + stub = get_stub() + name = "CREATEPLACEHOLDER" + item_id = stub.add_item(name, "COMP") + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Replace 'placeholder' with publishable instance. + + Renames prepared composition name, creates publishable instance, sets + frame/duration settings according to DB. + """ + pre_create_data = {"use_selection": True} + item_id, item = self._get_item(placeholder) + get_stub().select_items([item_id]) + self.populate_create_placeholder(placeholder, pre_create_data) + + # apply settings for populated composition + item_id, metadata_item = self._get_item(placeholder) + set_settings(True, True, [item_id]) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) diff --git a/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..7f7e4f49ce --- /dev/null +++ b/client/ayon_core/hosts/aftereffects/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,60 @@ +from ayon_core.pipeline.workfile.workfile_template_builder import ( + LoadPlaceholderItem, + PlaceholderLoadMixin +) +from ayon_core.hosts.aftereffects.api import get_stub +import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb + + +class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin): + identifier = "aftereffects.load" + label = "AfterEffects load" + + def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem: + return LoadPlaceholderItem( + scene_identifier=item_data["uuid"], + data=item_data["data"], + plugin=self + ) + + def create_placeholder(self, placeholder_data): + """Creates AE's Placeholder item in Project items list. + + Sets dummy resolution/duration/fps settings, will be replaced when + populated. + """ + stub = get_stub() + name = "LOADERPLACEHOLDER" + item_id = stub.add_placeholder(name, 1920, 1060, 25, 10) + + self._imprint_item(item_id, name, placeholder_data, stub) + + def populate_placeholder(self, placeholder): + """Use Openpype Loader from `placeholder` to create new FootageItems + + New FootageItems are created, files are imported. + """ + self.populate_load_placeholder(placeholder) + errors = placeholder.get_errors() + stub = get_stub() + if errors: + stub.print_msg("\n".join(errors)) + else: + if not placeholder.data["keep_placeholder"]: + metadata = stub.get_metadata() + for item in metadata: + if not item.get("is_placeholder"): + continue + scene_identifier = item.get("uuid") + if (scene_identifier and + scene_identifier == placeholder.scene_identifier): + stub.delete_item(item["members"][0]) + stub.remove_instance(placeholder.scene_identifier, metadata) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def load_succeed(self, placeholder, container): + placeholder_item_id, _ = self._get_item(placeholder) + item_id = container.id + get_stub().add_item_instead_placeholder(placeholder_item_id, item_id) diff --git a/client/ayon_core/hosts/blender/api/lib.py b/client/ayon_core/hosts/blender/api/lib.py index 458a275b51..32137f0fcd 100644 --- a/client/ayon_core/hosts/blender/api/lib.py +++ b/client/ayon_core/hosts/blender/api/lib.py @@ -33,7 +33,7 @@ def load_scripts(paths): if register: try: register() - except: + except: # noqa E722 traceback.print_exc() else: print("\nWarning! '%s' has no register function, " @@ -45,7 +45,7 @@ def load_scripts(paths): if unregister: try: unregister() - except: + except: # noqa E722 traceback.print_exc() def test_reload(mod): @@ -57,7 +57,7 @@ def load_scripts(paths): try: return importlib.reload(mod) - except: + except: # noqa E722 traceback.print_exc() def test_register(mod): diff --git a/client/ayon_core/hosts/blender/api/plugin.py b/client/ayon_core/hosts/blender/api/plugin.py index 6c9bfb6569..4a13d16805 100644 --- a/client/ayon_core/hosts/blender/api/plugin.py +++ b/client/ayon_core/hosts/blender/api/plugin.py @@ -143,13 +143,19 @@ def deselect_all(): if obj.mode != 'OBJECT': modes.append((obj, obj.mode)) bpy.context.view_layer.objects.active = obj - bpy.ops.object.mode_set(mode='OBJECT') + context_override = create_blender_context(active=obj) + with bpy.context.temp_override(**context_override): + bpy.ops.object.mode_set(mode='OBJECT') - bpy.ops.object.select_all(action='DESELECT') + context_override = create_blender_context() + with bpy.context.temp_override(**context_override): + bpy.ops.object.select_all(action='DESELECT') for p in modes: bpy.context.view_layer.objects.active = p[0] - bpy.ops.object.mode_set(mode=p[1]) + context_override = create_blender_context(active=p[0]) + with bpy.context.temp_override(**context_override): + bpy.ops.object.mode_set(mode=p[1]) bpy.context.view_layer.objects.active = active diff --git a/client/ayon_core/hosts/fusion/api/lib.py b/client/ayon_core/hosts/fusion/api/lib.py index 08722463e1..7f7d20010d 100644 --- a/client/ayon_core/hosts/fusion/api/lib.py +++ b/client/ayon_core/hosts/fusion/api/lib.py @@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False): def _on_repair(): attributes = dict() for key, comp_key, _label in validations: - value = folder_value[key] + value = folder_attributes[key] comp_key_full = "Comp.FrameFormat.{}".format(comp_key) attributes[comp_key_full] = value comp.SetPrefs(attributes) diff --git a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py index 7a2844d5db..95d029aad4 100644 --- a/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py +++ b/client/ayon_core/hosts/fusion/plugins/publish/collect_render.py @@ -115,6 +115,7 @@ class CollectFusionRender( if "review" in instance.families: # to skip ExtractReview locally instance.families.remove("review") + instance.deadline = inst.data.get("deadline") instances.append(instance) diff --git a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py index 156e2ac6ba..c63eb114e5 100644 --- a/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py +++ b/client/ayon_core/hosts/harmony/plugins/publish/collect_farm_render.py @@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender): outputFormat=info[1], outputStartFrame=info[3], leadingZeros=info[2], - ignoreFrameHandleCheck=True + ignoreFrameHandleCheck=True, + #todo: inst is not available, must be determined, fix when + #reworking to Publisher + # deadline=inst.data.get("deadline") ) render_instance.context = context diff --git a/client/ayon_core/hosts/hiero/api/events.py b/client/ayon_core/hosts/hiero/api/events.py index 304605e24e..663004abd2 100644 --- a/client/ayon_core/hosts/hiero/api/events.py +++ b/client/ayon_core/hosts/hiero/api/events.py @@ -8,6 +8,7 @@ from .lib import ( sync_avalon_data_to_workfile, launch_workfiles_app, before_project_save, + apply_colorspace_project ) from .tags import add_tags_to_workfile from .menu import update_menu_task_label @@ -44,6 +45,8 @@ def afterNewProjectCreated(event): # reset workfiles startup not to open any more in session os.environ["WORKFILES_STARTUP"] = "0" + apply_colorspace_project() + def beforeProjectLoad(event): log.info("before project load event...") @@ -122,6 +125,7 @@ def register_hiero_events(): except RuntimeError: pass + def register_events(): """ Adding all callbacks. diff --git a/client/ayon_core/hosts/hiero/api/lib.py b/client/ayon_core/hosts/hiero/api/lib.py index 8682ff7780..aaf99546c7 100644 --- a/client/ayon_core/hosts/hiero/api/lib.py +++ b/client/ayon_core/hosts/hiero/api/lib.py @@ -11,7 +11,6 @@ import warnings import json import ast import secrets -import shutil import hiero from qtpy import QtWidgets, QtCore @@ -36,9 +35,6 @@ from .constants import ( DEFAULT_SEQUENCE_NAME, DEFAULT_BIN_NAME ) -from ayon_core.pipeline.colorspace import ( - get_imageio_config -) class _CTX: @@ -105,9 +101,9 @@ def flatten(list_): def get_current_project(remove_untitled=False): - projects = flatten(hiero.core.projects()) + projects = hiero.core.projects() if not remove_untitled: - return next(iter(projects)) + return projects[0] # if remove_untitled for proj in projects: @@ -1050,18 +1046,68 @@ def _set_hrox_project_knobs(doc, **knobs): def apply_colorspace_project(): - project_name = get_current_project_name() - # get path the the active projects - project = get_current_project(remove_untitled=True) - current_file = project.path() - - # close the active project - project.close() + """Apply colorspaces from settings. + Due to not being able to set the project settings through the Python API, + we need to do use some dubious code to find the widgets and set them. It is + possible to set the project settings without traversing through the widgets + but it involves reading the hrox files from disk with XML, so no in-memory + support. See https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa + for more details. + """ # get presets for hiero + project_name = get_current_project_name() imageio = get_project_settings(project_name)["hiero"]["imageio"] presets = imageio.get("workfile") + # Open Project Settings UI. + for act in hiero.ui.registeredActions(): + if act.objectName() == "foundry.project.settings": + act.trigger() + + # Find widgets from their sibling label. + labels = { + "Working Space:": "workingSpace", + "Viewer:": "viewerLut", + "Thumbnails:": "thumbnailLut", + "Monitor Out:": "monitorOutLut", + "8 Bit Files:": "eightBitLut", + "16 Bit Files:": "sixteenBitLut", + "Log Files:": "logLut", + "Floating Point Files:": "floatLut" + } + widgets = {x: None for x in labels.values()} + + def _recursive_children(widget, labels, widgets): + children = widget.children() + for count, child in enumerate(children): + if isinstance(child, QtWidgets.QLabel): + if child.text() in labels.keys(): + widgets[labels[child.text()]] = children[count + 1] + _recursive_children(child, labels, widgets) + + app = QtWidgets.QApplication.instance() + title = "Project Settings" + for widget in app.topLevelWidgets(): + if isinstance(widget, QtWidgets.QMainWindow): + if widget.windowTitle() != title: + continue + _recursive_children(widget, labels, widgets) + widget.close() + + msg = "Setting value \"{}\" is not a valid option for \"{}\"" + for key, widget in widgets.items(): + options = [widget.itemText(i) for i in range(widget.count())] + setting_value = presets[key] + assert setting_value in options, msg.format(setting_value, key) + widget.setCurrentText(presets[key]) + + # This code block is for setting up project colorspaces for files on disk. + # Due to not having Python API access to set the project settings, the + # Foundry recommended way is to modify the hrox files on disk with XML. See + # this forum thread for more details; + # https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa + ''' # backward compatibility layer # TODO: remove this after some time config_data = get_imageio_config( @@ -1074,6 +1120,13 @@ def apply_colorspace_project(): "ocioConfigName": "custom" }) + # get path the the active projects + project = get_current_project() + current_file = project.path() + + msg = "The project needs to be saved to disk to apply colorspace settings." + assert current_file, msg + # save the workfile as subversion "comment:_colorspaceChange" split_current_file = os.path.splitext(current_file) copy_current_file = current_file @@ -1116,6 +1169,7 @@ def apply_colorspace_project(): # open the file as current project hiero.core.openProject(copy_current_file) + ''' def apply_colorspace_clips(): @@ -1125,10 +1179,8 @@ def apply_colorspace_clips(): # get presets for hiero imageio = get_project_settings(project_name)["hiero"]["imageio"] - from pprint import pprint presets = imageio.get("regexInputs", {}).get("inputs", {}) - pprint(presets) for clip in clips: clip_media_source_path = clip.mediaSource().firstpath() clip_name = clip.name() diff --git a/client/ayon_core/hosts/hiero/api/tags.py b/client/ayon_core/hosts/hiero/api/tags.py index 5abfee75d0..d4acb23493 100644 --- a/client/ayon_core/hosts/hiero/api/tags.py +++ b/client/ayon_core/hosts/hiero/api/tags.py @@ -144,7 +144,7 @@ def add_tags_to_workfile(): # Get project task types. project_name = get_current_project_name() project_entity = ayon_api.get_project(project_name) - task_types = project_entity["taskType"] + task_types = project_entity["taskTypes"] nks_pres_tags["[Tasks]"] = {} log.debug("__ tasks: {}".format(task_types)) for task_type in task_types: diff --git a/client/ayon_core/hosts/hiero/api/workio.py b/client/ayon_core/hosts/hiero/api/workio.py index 4c2416ca38..6e8fc20172 100644 --- a/client/ayon_core/hosts/hiero/api/workio.py +++ b/client/ayon_core/hosts/hiero/api/workio.py @@ -51,13 +51,12 @@ def open_file(filepath): project = hiero.core.projects()[-1] - # open project file - hiero.core.openProject(filepath.replace(os.path.sep, "/")) - - # close previous project - project.close() - - + # Close previous project if its different to the current project. + filepath = filepath.replace(os.path.sep, "/") + if project.path().replace(os.path.sep, "/") != filepath: + # open project file + hiero.core.openProject(filepath) + project.close() return True diff --git a/client/ayon_core/hosts/houdini/api/lib.py b/client/ayon_core/hosts/houdini/api/lib.py index da1b21ad95..7ca8f7f8f0 100644 --- a/client/ayon_core/hosts/houdini/api/lib.py +++ b/client/ayon_core/hosts/houdini/api/lib.py @@ -811,6 +811,43 @@ def get_current_context_template_data_with_folder_attrs(): return template_data +def set_review_color_space(opengl_node, review_color_space="", log=None): + """Set ociocolorspace parameter for the given OpenGL node. + + Set `ociocolorspace` parameter of the given OpenGl node + to to the given review_color_space value. + If review_color_space is empty, a default colorspace corresponding to + the display & view of the current Houdini session will be used. + + Args: + opengl_node (hou.Node): ROP node to set its ociocolorspace parm. + review_color_space (str): Colorspace value for ociocolorspace parm. + log (logging.Logger): Logger to log to. + """ + + if log is None: + log = self.log + + # Set Color Correction parameter to OpenColorIO + colorcorrect_parm = opengl_node.parm("colorcorrect") + if colorcorrect_parm.eval() != 2: + colorcorrect_parm.set(2) + log.debug( + "'Color Correction' parm on '{}' has been set to" + " 'OpenColorIO'".format(opengl_node.path()) + ) + + opengl_node.setParms( + {"ociocolorspace": review_color_space} + ) + + log.debug( + "'OCIO Colorspace' parm on '{}' has been set to " + "the view color space '{}'" + .format(opengl_node, review_color_space) + ) + + def get_context_var_changes(): """get context var changes.""" diff --git a/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py new file mode 100644 index 0000000000..7d41979600 --- /dev/null +++ b/client/ayon_core/hosts/houdini/hooks/set_default_display_and_view.py @@ -0,0 +1,64 @@ +from ayon_applications import PreLaunchHook, LaunchTypes + + +class SetDefaultDisplayView(PreLaunchHook): + """Set default view and default display for houdini via OpenColorIO. + + Houdini's defaultDisplay and defaultView are set by + setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS' + environment variables respectively. + + More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up + """ + + app_groups = {"houdini"} + launch_types = {LaunchTypes.local} + + def execute(self): + + OCIO = self.launch_context.env.get("OCIO") + + # This is a cheap way to skip this hook if either global color + # management or houdini color management was disabled because the + # OCIO var would be set by the global OCIOEnvHook + if not OCIO: + return + + # workfile settings added in '0.2.13' + houdini_color_settings = \ + self.data["project_settings"]["houdini"]["imageio"].get("workfile") + + if not houdini_color_settings: + self.log.info("Hook 'SetDefaultDisplayView' requires Houdini " + "addon version >= '0.2.13'") + return + + if not houdini_color_settings["enabled"]: + self.log.info( + "Houdini workfile color management is disabled." + ) + return + + # 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked + # as Admins can add them in Ayon env vars or Ayon tools. + + default_display = houdini_color_settings["default_display"] + if default_display: + # get 'OCIO_ACTIVE_DISPLAYS' value if exists. + self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display) + + default_view = houdini_color_settings["default_view"] + if default_view: + # get 'OCIO_ACTIVE_VIEWS' value if exists. + self._set_context_env("OCIO_ACTIVE_VIEWS", default_view) + + def _set_context_env(self, env_var, default_value): + env_value = self.launch_context.env.get(env_var, "") + new_value = ":".join( + key for key in [default_value, env_value] if key + ) + self.log.info( + "Setting {} environment to: {}" + .format(env_var, new_value) + ) + self.launch_context.env[env_var] = new_value diff --git a/client/ayon_core/hosts/houdini/plugins/create/create_review.py b/client/ayon_core/hosts/houdini/plugins/create/create_review.py index 18f7ce498d..f5e4d4ce64 100644 --- a/client/ayon_core/hosts/houdini/plugins/create/create_review.py +++ b/client/ayon_core/hosts/houdini/plugins/create/create_review.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Creator plugin for creating openGL reviews.""" -from ayon_core.hosts.houdini.api import plugin +from ayon_core.hosts.houdini.api import lib, plugin from ayon_core.lib import EnumDef, BoolDef, NumberDef import os @@ -14,6 +14,16 @@ class CreateReview(plugin.HoudiniCreator): label = "Review" product_type = "review" icon = "video-camera" + review_color_space = "" + + def apply_settings(self, project_settings): + super(CreateReview, self).apply_settings(project_settings) + # workfile settings added in '0.2.13' + color_settings = project_settings["houdini"]["imageio"].get( + "workfile", {} + ) + if color_settings.get("enabled"): + self.review_color_space = color_settings.get("review_color_space") def create(self, product_name, instance_data, pre_create_data): @@ -85,10 +95,20 @@ class CreateReview(plugin.HoudiniCreator): instance_node.setParms(parms) - # Set OCIO Colorspace to the default output colorspace + # Set OCIO Colorspace to the default colorspace # if there's OCIO if os.getenv("OCIO"): - self.set_colorcorrect_to_default_view_space(instance_node) + # Fall to the default value if cls.review_color_space is empty. + if not self.review_color_space: + # cls.review_color_space is an empty string + # when the imageio/workfile setting is disabled or + # when the Review colorspace setting is empty. + from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa + self.review_color_space = get_default_display_view_colorspace() + + lib.set_review_color_space(instance_node, + self.review_color_space, + self.log) to_lock = ["id", "productType"] @@ -131,23 +151,3 @@ class CreateReview(plugin.HoudiniCreator): minimum=0.0001, decimals=3) ] - - def set_colorcorrect_to_default_view_space(self, - instance_node): - """Set ociocolorspace to the default output space.""" - from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa - - # set Color Correction parameter to OpenColorIO - instance_node.setParms({"colorcorrect": 2}) - - # Get default view space for ociocolorspace parm. - default_view_space = get_default_display_view_colorspace() - instance_node.setParms( - {"ociocolorspace": default_view_space} - ) - - self.log.debug( - "'OCIO Colorspace' parm on '{}' has been set to " - "the default view color space '{}'" - .format(instance_node, default_view_space) - ) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py index 040ad68a1a..e931c7bf1b 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_cache_farm.py @@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api import lib class CollectDataforCache(pyblish.api.InstancePlugin): """Collect data for caching to Deadline.""" - order = pyblish.api.CollectorOrder + 0.04 + # Run after Collect Frames + order = pyblish.api.CollectorOrder + 0.11 families = ["ass", "pointcache", "mantraifd", "redshiftproxy", "vdbcache"] diff --git a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py index a643ab0d38..b38ebc6e2f 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/collect_frames.py @@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin): label = "Collect Frames" families = ["vdbcache", "imagesequence", "ass", "mantraifd", "redshiftproxy", "review", - "bgeo"] + "pointcache"] def process(self, instance): diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py index daf30b26ed..7ae476d2b4 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_alembic.py @@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor): staging_dir = os.path.dirname(output) instance.data["stagingDir"] = staging_dir - file_name = os.path.basename(output) + if instance.data.get("frames"): + # list of files + files = instance.data["frames"] + else: + # single file + files = os.path.basename(output) # We run the render - self.log.info("Writing alembic '%s' to '%s'" % (file_name, + self.log.info("Writing alembic '%s' to '%s'" % (files, staging_dir)) render_rop(ropnode) @@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor): representation = { 'name': 'abc', 'ext': 'abc', - 'files': file_name, + 'files': files, "stagingDir": staging_dir, } instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py index c6dfb4332d..0fab69ef4a 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_composite.py @@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext import hou -class ExtractComposite(publish.Extractor): +class ExtractComposite(publish.Extractor, + publish.ColormanagedPyblishPluginMixin): order = pyblish.api.ExtractorOrder label = "Extract Composite (Image Sequence)" @@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor): "frameEnd": instance.data["frameEndHandle"], } - from pprint import pformat - - self.log.info(pformat(representation)) + if ext.lower() == "exr": + # Inject colorspace with 'scene_linear' as that's the + # default Houdini working colorspace and all extracted + # OpenEXR images should be in that colorspace. + # https://www.sidefx.com/docs/houdini/render/linear.html#image-formats + self.set_representation_colorspace( + representation, instance.context, + colorspace="scene_linear" + ) instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py index fabdfd9a9d..57bb8b881a 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/extract_opengl.py @@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop import hou -class ExtractOpenGL(publish.Extractor): +class ExtractOpenGL(publish.Extractor, + publish.ColormanagedPyblishPluginMixin): order = pyblish.api.ExtractorOrder - 0.01 label = "Extract OpenGL" @@ -46,6 +47,14 @@ class ExtractOpenGL(publish.Extractor): "camera_name": instance.data.get("review_camera") } + if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled + colorspace = ropnode.evalParm("ociocolorspace") + # inject colorspace data + self.set_representation_colorspace( + representation, instance.context, + colorspace=colorspace + ) + if "representations" not in instance.data: instance.data["representations"] = [] instance.data["representations"].append(representation) diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py index fdf03d5cba..91bd36018a 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_cop_output_node.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- -import sys +import hou import pyblish.api -import six from ayon_core.pipeline import PublishValidationError @@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: raise PublishValidationError( - ("Output node(s) `{}` are incorrect. " - "See plug-in log for details.").format(invalid), - title=self.label + "Output node '{}' is incorrect. " + "See plug-in log for details.".format(invalid), + title=self.label, + description=( + "### Invalid COP output node\n\n" + "The output node path for the instance must be set to a " + "valid COP node path.\n\nSee the log for more details." + ) ) @classmethod def get_invalid(cls, instance): + output_node = instance.data.get("output_node") - import hou - - try: - output_node = instance.data["output_node"] - except KeyError: - six.reraise( - PublishValidationError, - PublishValidationError( - "Can't determine COP output node.", - title=cls.__name__), - sys.exc_info()[2] - ) - - if output_node is None: + if not output_node: node = hou.node(instance.data.get("instance_node")) cls.log.error( "COP Output node in '%s' does not exist. " @@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): cls.log.error( "Output node %s is not a COP node. " "COP Path must point to a COP node, " - "instead found category type: %s" - % (output_node.path(), output_node.type().category().name()) + "instead found category type: %s", + output_node.path(), output_node.type().category().name() ) return [output_node.path()] @@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin): # is Cop2 to avoid potential edge case scenarios even though # the isinstance check above should be stricter than this category if output_node.type().category().name() != "Cop2": - raise PublishValidationError( - ( - "Output node {} is not of category Cop2." - " This is a bug..." - ).format(output_node.path()), - title=cls.label) + cls.log.error( + "Output node %s is not of category Cop2.", output_node.path() + ) + return [output_node.path()] diff --git a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py index 031138e21d..e7f528ba57 100644 --- a/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py +++ b/client/ayon_core/hosts/houdini/plugins/publish/validate_review_colorspace.py @@ -4,15 +4,19 @@ from ayon_core.pipeline import ( PublishValidationError, OptionalPyblishPluginMixin ) -from ayon_core.pipeline.publish import RepairAction +from ayon_core.pipeline.publish import ( + RepairAction, + get_plugin_settings, + apply_plugin_settings_automatically +) from ayon_core.hosts.houdini.api.action import SelectROPAction import os import hou -class SetDefaultViewSpaceAction(RepairAction): - label = "Set default view colorspace" +class ResetViewSpaceAction(RepairAction): + label = "Reset OCIO colorspace parm" icon = "mdi.monitor" @@ -27,9 +31,28 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin, families = ["review"] hosts = ["houdini"] label = "Validate Review Colorspace" - actions = [SetDefaultViewSpaceAction, SelectROPAction] + actions = [ResetViewSpaceAction, SelectROPAction] optional = True + review_color_space = "" + + @classmethod + def apply_settings(cls, project_settings): + # Preserve automatic settings applying logic + settings = get_plugin_settings(plugin=cls, + project_settings=project_settings, + log=cls.log, + category="houdini") + apply_plugin_settings_automatically(cls, settings, logger=cls.log) + + # workfile settings added in '0.2.13' + color_settings = project_settings["houdini"]["imageio"].get( + "workfile", {} + ) + # Add review color settings + if color_settings.get("enabled"): + cls.review_color_space = color_settings.get("review_color_space") + def process(self, instance): @@ -52,39 +75,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin, " 'OpenColorIO'".format(rop_node.path()) ) - if rop_node.evalParm("ociocolorspace") not in \ - hou.Color.ocio_spaces(): - + current_color_space = rop_node.evalParm("ociocolorspace") + if current_color_space not in hou.Color.ocio_spaces(): raise PublishValidationError( "Invalid value: Colorspace name doesn't exist.\n" "Check 'OCIO Colorspace' parameter on '{}' ROP" .format(rop_node.path()) ) - @classmethod - def repair(cls, instance): - """Set Default View Space Action. + # if houdini/imageio/workfile is enabled and + # Review colorspace setting is empty then this check should + # actually check if the current_color_space setting equals + # the default colorspace value. + # However, it will make the black cmd screen show up more often + # which is very annoying. + if self.review_color_space and \ + self.review_color_space != current_color_space: - It is a helper action more than a repair action, - used to set colorspace on opengl node to the default view. - """ - from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa - - rop_node = hou.node(instance.data["instance_node"]) - - if rop_node.evalParm("colorcorrect") != 2: - rop_node.setParms({"colorcorrect": 2}) - cls.log.debug( - "'Color Correction' parm on '{}' has been set to" - " 'OpenColorIO'".format(rop_node.path()) + raise PublishValidationError( + "Invalid value: Colorspace name doesn't match" + "the Colorspace specified in settings." ) - # Get default view colorspace name - default_view_space = get_default_display_view_colorspace() + @classmethod + def repair(cls, instance): + """Reset view colorspace. - rop_node.setParms({"ociocolorspace": default_view_space}) - cls.log.info( - "'OCIO Colorspace' parm on '{}' has been set to " - "the default view color space '{}'" - .format(rop_node, default_view_space) - ) + It is used to set colorspace on opengl node. + + It uses the colorspace value specified in the Houdini addon settings. + If the value in the Houdini addon settings is empty, + it will fall to the default colorspace. + + Note: + This repair action assumes that OCIO is enabled. + As if OCIO is disabled the whole validation is skipped + and this repair action won't show up. + """ + from ayon_core.hosts.houdini.api.lib import set_review_color_space + + # Fall to the default value if cls.review_color_space is empty. + if not cls.review_color_space: + # cls.review_color_space is an empty string + # when the imageio/workfile setting is disabled or + # when the Review colorspace setting is empty. + from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa + cls.review_color_space = get_default_display_view_colorspace() + + rop_node = hou.node(instance.data["instance_node"]) + set_review_color_space(rop_node, + cls.review_color_space, + cls.log) diff --git a/client/ayon_core/hosts/max/api/lib.py b/client/ayon_core/hosts/max/api/lib.py index 02b099b3ff..0e3abe25ec 100644 --- a/client/ayon_core/hosts/max/api/lib.py +++ b/client/ayon_core/hosts/max/api/lib.py @@ -6,12 +6,9 @@ import json from typing import Any, Dict, Union import six -import ayon_api from ayon_core.pipeline import ( get_current_project_name, - get_current_folder_path, - get_current_task_name, colorspace ) from ayon_core.settings import get_project_settings @@ -496,9 +493,9 @@ def object_transform_set(container_children): """ transform_set = {} for node in container_children: - name = f"{node.name}.transform" + name = f"{node}.transform" transform_set[name] = node.pos - name = f"{node.name}.scale" + name = f"{node}.scale" transform_set[name] = node.scale return transform_set @@ -519,6 +516,36 @@ def get_plugins() -> list: return plugin_info_list +def update_modifier_node_names(event, node): + """Update the name of the nodes after renaming + + Args: + event (pymxs.MXSWrapperBase): Event Name ( + Mandatory argument for rt.NodeEventCallback) + node (list): Event Number ( + Mandatory argument for rt.NodeEventCallback) + + """ + containers = [ + obj + for obj in rt.Objects + if ( + rt.ClassOf(obj) == rt.Container + and rt.getUserProp(obj, "id") == "pyblish.avalon.instance" + and rt.getUserProp(obj, "productType") not in { + "workfile", "tyflow" + } + ) + ] + if not containers: + return + for container in containers: + ayon_data = container.modifiers[0].openPypeData + updated_node_names = [str(node.node) for node + in ayon_data.all_handles] + rt.setProperty(ayon_data, "sel_list", updated_node_names) + + @contextlib.contextmanager def render_resolution(width, height): """Set render resolution option during context diff --git a/client/ayon_core/hosts/max/api/pipeline.py b/client/ayon_core/hosts/max/api/pipeline.py index 675f36c24f..dc13f47795 100644 --- a/client/ayon_core/hosts/max/api/pipeline.py +++ b/client/ayon_core/hosts/max/api/pipeline.py @@ -63,6 +63,8 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): rt.callbacks.addScript(rt.Name('postWorkspaceChange'), self._deferred_menu_creation) + rt.NodeEventCallback( + nameChanged=lib.update_modifier_node_names) def workfile_has_unsaved_changes(self): return rt.getSaveRequired() diff --git a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py index 4f982dd5ba..97b8c6cd52 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_max_scene.py +++ b/client/ayon_core/hosts/max/plugins/load/load_max_scene.py @@ -117,7 +117,7 @@ class MaxSceneLoader(load.LoaderPlugin): ) for max_obj, obj_name in zip(max_objects, max_object_names): max_obj.name = f"{namespace}:{obj_name}" - max_container.append(rt.getNodeByName(max_obj.name)) + max_container.append(max_obj) return containerise( name, max_container, context, namespace, loader=self.__class__.__name__) @@ -158,11 +158,11 @@ class MaxSceneLoader(load.LoaderPlugin): current_max_object_names): max_obj.name = f"{namespace}:{obj_name}" max_objects.append(max_obj) - max_transform = f"{max_obj.name}.transform" + max_transform = f"{max_obj}.transform" if max_transform in transform_data.keys(): max_obj.pos = transform_data[max_transform] or 0 max_obj.scale = transform_data[ - f"{max_obj.name}.scale"] or 0 + f"{max_obj}.scale"] or 0 update_custom_attribute_data(node, max_objects) lib.imprint(container["instance_node"], { diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py index 82cad71c3e..6f5de20ae0 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_fbx.py @@ -76,11 +76,11 @@ class FbxModelLoader(load.LoaderPlugin): for fbx_object in current_fbx_objects: fbx_object.name = f"{namespace}:{fbx_object.name}" fbx_objects.append(fbx_object) - fbx_transform = f"{fbx_object.name}.transform" + fbx_transform = f"{fbx_object}.transform" if fbx_transform in transform_data.keys(): fbx_object.pos = transform_data[fbx_transform] or 0 fbx_object.scale = transform_data[ - f"{fbx_object.name}.scale"] or 0 + f"{fbx_object}.scale"] or 0 with maintained_selection(): rt.Select(node) diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py index 38f2cdf43c..a9119259df 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_obj.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_obj.py @@ -67,11 +67,11 @@ class ObjLoader(load.LoaderPlugin): selections = rt.GetCurrentSelection() for selection in selections: selection.name = f"{namespace}:{selection.name}" - selection_transform = f"{selection.name}.transform" + selection_transform = f"{selection}.transform" if selection_transform in transform_data.keys(): selection.pos = transform_data[selection_transform] or 0 selection.scale = transform_data[ - f"{selection.name}.scale"] or 0 + f"{selection}.scale"] or 0 update_custom_attribute_data(node, selections) with maintained_selection(): rt.Select(node) diff --git a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py index 2b946eb2aa..2ed5d64a18 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_model_usd.py +++ b/client/ayon_core/hosts/max/plugins/load/load_model_usd.py @@ -95,11 +95,11 @@ class ModelUSDLoader(load.LoaderPlugin): for children in asset.Children: children.name = f"{namespace}:{children.name}" usd_objects.append(children) - children_transform = f"{children.name}.transform" + children_transform = f"{children}.transform" if children_transform in transform_data.keys(): children.pos = transform_data[children_transform] or 0 children.scale = transform_data[ - f"{children.name}.scale"] or 0 + f"{children}.scale"] or 0 asset.name = f"{namespace}:{asset.name}" usd_objects.append(asset) diff --git a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py index 2efb7c7f62..47690f84e9 100644 --- a/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py +++ b/client/ayon_core/hosts/max/plugins/load/load_pointcache_ornatrix.py @@ -92,10 +92,10 @@ class OxAbcLoader(load.LoaderPlugin): abc.Parent = container abc.name = f"{namespace}:{abc.name}" ox_abc_objects.append(abc) - ox_transform = f"{abc.name}.transform" + ox_transform = f"{abc}.transform" if ox_transform in transform_data.keys(): abc.pos = transform_data[ox_transform] or 0 - abc.scale = transform_data[f"{abc.name}.scale"] or 0 + abc.scale = transform_data[f"{abc}.scale"] or 0 update_custom_attribute_data(node, ox_abc_objects) lib.imprint( container["instance_node"], diff --git a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py index 67b5174200..67cec23ecc 100644 --- a/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py +++ b/client/ayon_core/hosts/max/plugins/publish/extract_alembic.py @@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor, hosts = ["max"] families = ["pointcache"] optional = True + active = True def process(self, instance): if not self.is_active(instance.data): @@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor, @classmethod def get_attribute_defs(cls): - return [ + defs = super(ExtractAlembic, cls).get_attribute_defs() + defs.extend([ BoolDef("custom_attrs", label="Custom Attributes", default=False), - ] + ]) + return defs class ExtractCameraAlembic(ExtractAlembic): """Extract Camera with AlembicExport.""" - label = "Extract Alembic Camera" families = ["camera"] + optional = True -class ExtractModel(ExtractAlembic): +class ExtractModelAlembic(ExtractAlembic): """Extract Geometry in Alembic Format""" label = "Extract Geometry (Alembic)" families = ["model"] + optional = True def _set_abc_attributes(self, instance): attr_values = self.get_attr_values_from_data(instance.data) diff --git a/client/ayon_core/hosts/max/startup/startup.ms b/client/ayon_core/hosts/max/startup/startup.ms index 2dfe53a6a5..c5b4f0e526 100644 --- a/client/ayon_core/hosts/max/startup/startup.ms +++ b/client/ayon_core/hosts/max/startup/startup.ms @@ -12,4 +12,4 @@ max create mode python.ExecuteFile startup -) \ No newline at end of file +) diff --git a/client/ayon_core/hosts/maya/api/alembic.py b/client/ayon_core/hosts/maya/api/alembic.py new file mode 100644 index 0000000000..6bd00e1cb1 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/alembic.py @@ -0,0 +1,350 @@ +import json +import logging +import os + +from maya import cmds # noqa + +from ayon_core.hosts.maya.api.lib import evaluation + +log = logging.getLogger(__name__) + +# The maya alembic export types +ALEMBIC_ARGS = { + "attr": (list, tuple), + "attrPrefix": (list, tuple), + "autoSubd": bool, + "dataFormat": str, + "endFrame": float, + "eulerFilter": bool, + "frameRange": str, # "start end"; overrides startFrame & endFrame + "frameRelativeSample": float, + "melPerFrameCallback": str, + "melPostJobCallback": str, + "noNormals": bool, + "preRoll": bool, + "pythonPerFrameCallback": str, + "pythonPostJobCallback": str, + "renderableOnly": bool, + "root": (list, tuple), + "selection": bool, + "startFrame": float, + "step": float, + "stripNamespaces": bool, + "userAttr": (list, tuple), + "userAttrPrefix": (list, tuple), + "uvWrite": bool, + "uvsOnly": bool, + "verbose": bool, + "wholeFrameGeo": bool, + "worldSpace": bool, + "writeColorSets": bool, + "writeCreases": bool, # Maya 2015 Ext1+ + "writeFaceSets": bool, + "writeUVSets": bool, # Maya 2017+ + "writeVisibility": bool, +} + + +def extract_alembic( + file, + attr=None, + attrPrefix=None, + dataFormat="ogawa", + endFrame=None, + eulerFilter=True, + frameRange="", + melPerFrameCallback=None, + melPostJobCallback=None, + noNormals=False, + preRoll=False, + preRollStartFrame=0, + pythonPerFrameCallback=None, + pythonPostJobCallback=None, + renderableOnly=False, + root=None, + selection=True, + startFrame=None, + step=1.0, + stripNamespaces=True, + userAttr=None, + userAttrPrefix=None, + uvsOnly=False, + uvWrite=True, + verbose=False, + wholeFrameGeo=False, + worldSpace=False, + writeColorSets=False, + writeCreases=False, + writeFaceSets=False, + writeUVSets=False, + writeVisibility=False +): + """Extract a single Alembic Cache. + + This extracts an Alembic cache using the `-selection` flag to minimize + the extracted content to solely what was Collected into the instance. + + Arguments: + file (str): The filepath to write the alembic file to. + + attr (list of str, optional): A specific geometric attribute to write + out. Defaults to []. + + attrPrefix (list of str, optional): Prefix filter for determining which + geometric attributes to write out. Defaults to ["ABC_"]. + + dataFormat (str): The data format to use for the cache, + defaults to "ogawa" + + endFrame (float): End frame of output. Ignored if `frameRange` + provided. + + eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with + an Euler filter. Euler filtering helps resolve irregularities in + rotations especially if X, Y, and Z rotations exceed 360 degrees. + Defaults to True. + + frameRange (tuple or str): Two-tuple with start and end frame or a + string formatted as: "startFrame endFrame". This argument + overrides `startFrame` and `endFrame` arguments. + + melPerFrameCallback (Optional[str]): MEL callback run per frame. + + melPostJobCallback (Optional[str]): MEL callback after last frame is + written. + + noNormals (bool): When on, normal data from the original polygon + objects is not included in the exported Alembic cache file. + + preRoll (bool): This frame range will not be sampled. + Defaults to False. + + preRollStartFrame (float): The frame to start scene + evaluation at. This is used to set the starting frame for time + dependent translations and can be used to evaluate run-up that + isn't actually translated. Defaults to 0. + + pythonPerFrameCallback (Optional[str]): Python callback run per frame. + + pythonPostJobCallback (Optional[str]): Python callback after last frame + is written. + + renderableOnly (bool): When on, any non-renderable nodes or hierarchy, + such as hidden objects, are not included in the Alembic file. + Defaults to False. + + root (list of str): Maya dag path which will be parented to + the root of the Alembic file. Defaults to [], which means the + entire scene will be written out. + + selection (bool): Write out all all selected nodes from the + active selection list that are descendents of the roots specified + with -root. Defaults to False. + + startFrame (float): Start frame of output. Ignored if `frameRange` + provided. + + step (float): The time interval (expressed in frames) at + which the frame range is sampled. Additional samples around each + frame can be specified with -frs. Defaults to 1.0. + + stripNamespaces (bool): When on, any namespaces associated with the + exported objects are removed from the Alembic file. For example, an + object with the namespace taco:foo:bar appears as bar in the + Alembic file. + + userAttr (list of str, optional): A specific user defined attribute to + write out. Defaults to []. + + userAttrPrefix (list of str, optional): Prefix filter for determining + which user defined attributes to write out. Defaults to []. + + uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes + will be written to the Alembic file. + + uvWrite (bool): When on, UV data from polygon meshes and subdivision + objects are written to the Alembic file. Only the current UV map is + included. + + verbose (bool): When on, outputs frame number information to the + Script Editor or output window during extraction. + + wholeFrameGeo (bool): Data for geometry will only be written + out on whole frames. Defaults to False. + + worldSpace (bool): When on, the top node in the node hierarchy is + stored as world space. By default, these nodes are stored as local + space. Defaults to False. + + writeColorSets (bool): Write all color sets on MFnMeshes as + color 3 or color 4 indexed geometry parameters with face varying + scope. Defaults to False. + + writeCreases (bool): If the mesh has crease edges or crease + vertices, the mesh (OPolyMesh) would now be written out as an OSubD + and crease info will be stored in the Alembic file. Otherwise, + creases info won't be preserved in Alembic file unless a custom + Boolean attribute SubDivisionMesh has been added to mesh node and + its value is true. Defaults to False. + + writeFaceSets (bool): Write all Face sets on MFnMeshes. + Defaults to False. + + writeUVSets (bool): Write all uv sets on MFnMeshes as vector + 2 indexed geometry parameters with face varying scope. Defaults to + False. + + writeVisibility (bool): Visibility state will be stored in + the Alembic file. Otherwise everything written out is treated as + visible. Defaults to False. + """ + + # Ensure alembic exporter is loaded + cmds.loadPlugin('AbcExport', quiet=True) + + # Alembic Exporter requires forward slashes + file = file.replace('\\', '/') + + # Ensure list arguments are valid. + attr = attr or [] + attrPrefix = attrPrefix or [] + userAttr = userAttr or [] + userAttrPrefix = userAttrPrefix or [] + root = root or [] + + # Pass the start and end frame on as `frameRange` so that it + # never conflicts with that argument + if not frameRange: + # Fallback to maya timeline if no start or end frame provided. + if startFrame is None: + startFrame = cmds.playbackOptions(query=True, + animationStartTime=True) + if endFrame is None: + endFrame = cmds.playbackOptions(query=True, + animationEndTime=True) + + # Ensure valid types are converted to frame range + assert isinstance(startFrame, ALEMBIC_ARGS["startFrame"]) + assert isinstance(endFrame, ALEMBIC_ARGS["endFrame"]) + frameRange = "{0} {1}".format(startFrame, endFrame) + else: + # Allow conversion from tuple for `frameRange` + if isinstance(frameRange, (list, tuple)): + assert len(frameRange) == 2 + frameRange = "{0} {1}".format(frameRange[0], frameRange[1]) + + # Assemble options + options = { + "selection": selection, + "frameRange": frameRange, + "eulerFilter": eulerFilter, + "noNormals": noNormals, + "preRoll": preRoll, + "root": root, + "renderableOnly": renderableOnly, + "uvWrite": uvWrite, + "uvsOnly": uvsOnly, + "writeColorSets": writeColorSets, + "writeFaceSets": writeFaceSets, + "wholeFrameGeo": wholeFrameGeo, + "worldSpace": worldSpace, + "writeVisibility": writeVisibility, + "writeUVSets": writeUVSets, + "writeCreases": writeCreases, + "dataFormat": dataFormat, + "step": step, + "attr": attr, + "attrPrefix": attrPrefix, + "userAttr": userAttr, + "userAttrPrefix": userAttrPrefix, + "stripNamespaces": stripNamespaces, + "verbose": verbose + } + + # Validate options + for key, value in options.copy().items(): + + # Discard unknown options + if key not in ALEMBIC_ARGS: + log.warning("extract_alembic() does not support option '%s'. " + "Flag will be ignored..", key) + options.pop(key) + continue + + # Validate value type + valid_types = ALEMBIC_ARGS[key] + if not isinstance(value, valid_types): + raise TypeError("Alembic option unsupported type: " + "{0} (expected {1})".format(value, valid_types)) + + # Ignore empty values, like an empty string, since they mess up how + # job arguments are built + if isinstance(value, (list, tuple)): + value = [x for x in value if x.strip()] + + # Ignore option completely if no values remaining + if not value: + options.pop(key) + continue + + options[key] = value + + # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ + maya_version = int(cmds.about(version=True)) + if maya_version >= 2018: + options['autoSubd'] = options.pop('writeCreases', False) + + # Only add callbacks if they are set so that we're not passing `None` + callbacks = { + "melPerFrameCallback": melPerFrameCallback, + "melPostJobCallback": melPostJobCallback, + "pythonPerFrameCallback": pythonPerFrameCallback, + "pythonPostJobCallback": pythonPostJobCallback, + } + for key, callback in callbacks.items(): + if callback: + options[key] = str(callback) + + # Format the job string from options + job_args = list() + for key, value in options.items(): + if isinstance(value, (list, tuple)): + for entry in value: + job_args.append("-{} {}".format(key, entry)) + elif isinstance(value, bool): + # Add only when state is set to True + if value: + job_args.append("-{0}".format(key)) + else: + job_args.append("-{0} {1}".format(key, value)) + + job_str = " ".join(job_args) + job_str += ' -file "%s"' % file + + # Ensure output directory exists + parent_dir = os.path.dirname(file) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + if verbose: + log.debug("Preparing Alembic export with options: %s", + json.dumps(options, indent=4)) + log.debug("Extracting Alembic with job arguments: %s", job_str) + + # Perform extraction + print("Alembic Job Arguments : {}".format(job_str)) + + # Disable the parallel evaluation temporarily to ensure no buggy + # exports are made. (PLN-31) + # TODO: Make sure this actually fixes the issues + with evaluation("off"): + cmds.AbcExport( + j=job_str, + verbose=verbose, + preRollStartFrame=preRollStartFrame + ) + + if verbose: + log.debug("Extracted Alembic to: %s", file) + + return file diff --git a/client/ayon_core/hosts/maya/api/lib.py b/client/ayon_core/hosts/maya/api/lib.py index 321bcbc0b5..2b41ffc06c 100644 --- a/client/ayon_core/hosts/maya/api/lib.py +++ b/client/ayon_core/hosts/maya/api/lib.py @@ -70,37 +70,6 @@ DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] -# The maya alembic export types -_alembic_options = { - "startFrame": float, - "endFrame": float, - "frameRange": str, # "start end"; overrides startFrame & endFrame - "eulerFilter": bool, - "frameRelativeSample": float, - "noNormals": bool, - "renderableOnly": bool, - "step": float, - "stripNamespaces": bool, - "uvWrite": bool, - "wholeFrameGeo": bool, - "worldSpace": bool, - "writeVisibility": bool, - "writeColorSets": bool, - "writeFaceSets": bool, - "writeCreases": bool, # Maya 2015 Ext1+ - "writeUVSets": bool, # Maya 2017+ - "dataFormat": str, - "root": (list, tuple), - "attr": (list, tuple), - "attrPrefix": (list, tuple), - "userAttr": (list, tuple), - "melPerFrameCallback": str, - "melPostJobCallback": str, - "pythonPerFrameCallback": str, - "pythonPostJobCallback": str, - "selection": bool -} - INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000} FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94} @@ -1330,7 +1299,7 @@ def is_visible(node, override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node)) override_visibility = cmds.getAttr('{}.overrideVisibility'.format( node)) - if override_enabled and override_visibility: + if override_enabled and not override_visibility: return False if parentHidden: @@ -1346,178 +1315,6 @@ def is_visible(node, return True - -def extract_alembic(file, - startFrame=None, - endFrame=None, - selection=True, - uvWrite=True, - eulerFilter=True, - dataFormat="ogawa", - verbose=False, - **kwargs): - """Extract a single Alembic Cache. - - This extracts an Alembic cache using the `-selection` flag to minimize - the extracted content to solely what was Collected into the instance. - - Arguments: - - startFrame (float): Start frame of output. Ignored if `frameRange` - provided. - - endFrame (float): End frame of output. Ignored if `frameRange` - provided. - - frameRange (tuple or str): Two-tuple with start and end frame or a - string formatted as: "startFrame endFrame". This argument - overrides `startFrame` and `endFrame` arguments. - - dataFormat (str): The data format to use for the cache, - defaults to "ogawa" - - verbose (bool): When on, outputs frame number information to the - Script Editor or output window during extraction. - - noNormals (bool): When on, normal data from the original polygon - objects is not included in the exported Alembic cache file. - - renderableOnly (bool): When on, any non-renderable nodes or hierarchy, - such as hidden objects, are not included in the Alembic file. - Defaults to False. - - stripNamespaces (bool): When on, any namespaces associated with the - exported objects are removed from the Alembic file. For example, an - object with the namespace taco:foo:bar appears as bar in the - Alembic file. - - uvWrite (bool): When on, UV data from polygon meshes and subdivision - objects are written to the Alembic file. Only the current UV map is - included. - - worldSpace (bool): When on, the top node in the node hierarchy is - stored as world space. By default, these nodes are stored as local - space. Defaults to False. - - eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with - an Euler filter. Euler filtering helps resolve irregularities in - rotations especially if X, Y, and Z rotations exceed 360 degrees. - Defaults to True. - - """ - - # Ensure alembic exporter is loaded - cmds.loadPlugin('AbcExport', quiet=True) - - # Alembic Exporter requires forward slashes - file = file.replace('\\', '/') - - # Pass the start and end frame on as `frameRange` so that it - # never conflicts with that argument - if "frameRange" not in kwargs: - # Fallback to maya timeline if no start or end frame provided. - if startFrame is None: - startFrame = cmds.playbackOptions(query=True, - animationStartTime=True) - if endFrame is None: - endFrame = cmds.playbackOptions(query=True, - animationEndTime=True) - - # Ensure valid types are converted to frame range - assert isinstance(startFrame, _alembic_options["startFrame"]) - assert isinstance(endFrame, _alembic_options["endFrame"]) - kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame) - else: - # Allow conversion from tuple for `frameRange` - frame_range = kwargs["frameRange"] - if isinstance(frame_range, (list, tuple)): - assert len(frame_range) == 2 - kwargs["frameRange"] = "{0} {1}".format(frame_range[0], - frame_range[1]) - - # Assemble options - options = { - "selection": selection, - "uvWrite": uvWrite, - "eulerFilter": eulerFilter, - "dataFormat": dataFormat - } - options.update(kwargs) - - # Validate options - for key, value in options.copy().items(): - - # Discard unknown options - if key not in _alembic_options: - log.warning("extract_alembic() does not support option '%s'. " - "Flag will be ignored..", key) - options.pop(key) - continue - - # Validate value type - valid_types = _alembic_options[key] - if not isinstance(value, valid_types): - raise TypeError("Alembic option unsupported type: " - "{0} (expected {1})".format(value, valid_types)) - - # Ignore empty values, like an empty string, since they mess up how - # job arguments are built - if isinstance(value, (list, tuple)): - value = [x for x in value if x.strip()] - - # Ignore option completely if no values remaining - if not value: - options.pop(key) - continue - - options[key] = value - - # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ - maya_version = int(cmds.about(version=True)) - if maya_version >= 2018: - options['autoSubd'] = options.pop('writeCreases', False) - - # Format the job string from options - job_args = list() - for key, value in options.items(): - if isinstance(value, (list, tuple)): - for entry in value: - job_args.append("-{} {}".format(key, entry)) - elif isinstance(value, bool): - # Add only when state is set to True - if value: - job_args.append("-{0}".format(key)) - else: - job_args.append("-{0} {1}".format(key, value)) - - job_str = " ".join(job_args) - job_str += ' -file "%s"' % file - - # Ensure output directory exists - parent_dir = os.path.dirname(file) - if not os.path.exists(parent_dir): - os.makedirs(parent_dir) - - if verbose: - log.debug("Preparing Alembic export with options: %s", - json.dumps(options, indent=4)) - log.debug("Extracting Alembic with job arguments: %s", job_str) - - # Perform extraction - print("Alembic Job Arguments : {}".format(job_str)) - - # Disable the parallel evaluation temporarily to ensure no buggy - # exports are made. (PLN-31) - # TODO: Make sure this actually fixes the issues - with evaluation("off"): - cmds.AbcExport(j=job_str, verbose=verbose) - - if verbose: - log.debug("Extracted Alembic to: %s", file) - - return file - - # region ID def get_id_required_nodes(referenced_nodes=False, nodes=None, @@ -2520,7 +2317,16 @@ def set_scene_fps(fps, update=True): """ fps_mapping = { + '2': '2fps', + '3': '3fps', + '4': '4fps', + '5': '5fps', + '6': '6fps', + '8': '8fps', + '10': '10fps', + '12': '12fps', '15': 'game', + '16': '16fps', '24': 'film', '25': 'pal', '30': 'ntsc', @@ -2612,21 +2418,24 @@ def get_fps_for_current_context(): Returns: Union[int, float]: FPS value. """ - - project_name = get_current_project_name() - folder_path = get_current_folder_path() - folder_entity = ayon_api.get_folder_by_path( - project_name, folder_path, fields={"attrib.fps"} - ) or {} - fps = folder_entity.get("attrib", {}).get("fps") + task_entity = get_current_task_entity(fields={"attrib"}) + fps = task_entity.get("attrib", {}).get("fps") if not fps: - project_entity = ayon_api.get_project( - project_name, fields=["attrib.fps"] + project_name = get_current_project_name() + folder_path = get_current_folder_path() + folder_entity = ayon_api.get_folder_by_path( + project_name, folder_path, fields={"attrib.fps"} ) or {} - fps = project_entity.get("attrib", {}).get("fps") + fps = folder_entity.get("attrib", {}).get("fps") if not fps: - fps = 25 + project_entity = ayon_api.get_project( + project_name, fields=["attrib.fps"] + ) or {} + fps = project_entity.get("attrib", {}).get("fps") + + if not fps: + fps = 25 return convert_to_maya_fps(fps) @@ -4403,3 +4212,23 @@ def create_rig_animation_instance( variant=namespace, pre_create_data={"use_selection": True} ) + + +def get_node_index_under_parent(node: str) -> int: + """Return the index of a DAG node under its parent. + + Arguments: + node (str): A DAG Node path. + + Returns: + int: The DAG node's index under its parents or world + + """ + node = cmds.ls(node, long=True)[0] # enforce long names + parent = node.rsplit("|", 1)[0] + if not parent: + return cmds.ls(assemblies=True, long=True).index(node) + else: + return cmds.listRelatives(parent, + children=True, + fullPath=True).index(node) diff --git a/client/ayon_core/hosts/maya/api/lib_renderproducts.py b/client/ayon_core/hosts/maya/api/lib_renderproducts.py index 832d1c21c2..52c282c6de 100644 --- a/client/ayon_core/hosts/maya/api/lib_renderproducts.py +++ b/client/ayon_core/hosts/maya/api/lib_renderproducts.py @@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts): # AOVs > Legacy > Maya Render View > Mode aovs_enabled = bool( - self._get_attr("defaultArnoldRenderOptions.aovMode") + self._get_attr( + "defaultArnoldRenderOptions.aovMode", as_string=False) ) if not aovs_enabled: return beauty_products diff --git a/client/ayon_core/hosts/maya/api/pipeline.py b/client/ayon_core/hosts/maya/api/pipeline.py index 864a0c1599..74d73e5f95 100644 --- a/client/ayon_core/hosts/maya/api/pipeline.py +++ b/client/ayon_core/hosts/maya/api/pipeline.py @@ -30,9 +30,11 @@ from ayon_core.pipeline import ( register_loader_plugin_path, register_inventory_action_path, register_creator_plugin_path, + register_workfile_build_plugin_path, deregister_loader_plugin_path, deregister_inventory_action_path, deregister_creator_plugin_path, + deregister_workfile_build_plugin_path, AYON_CONTAINER_ID, AVALON_CONTAINER_ID, ) @@ -47,7 +49,6 @@ from ayon_core.hosts.maya import MAYA_ROOT_DIR from ayon_core.hosts.maya.lib import create_workspace_mel from . import menu, lib -from .workfile_template_builder import MayaPlaceholderLoadPlugin from .workio import ( open_file, save_file, @@ -64,6 +65,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -93,7 +95,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) - self.log.info(PUBLISH_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) self.log.info("Installing callbacks ... ") register_event_callback("init", on_init) @@ -148,11 +150,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): def get_containers(self): return ls() - def get_workfile_build_placeholder_plugins(self): - return [ - MayaPlaceholderLoadPlugin - ] - @contextlib.contextmanager def maintained_selection(self): with lib.maintained_selection(): @@ -338,6 +335,7 @@ def uninstall(): deregister_loader_plugin_path(LOAD_PATH) deregister_creator_plugin_path(CREATE_PATH) deregister_inventory_action_path(INVENTORY_PATH) + deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH) menu.uninstall() diff --git a/client/ayon_core/hosts/maya/api/workfile_template_builder.py b/client/ayon_core/hosts/maya/api/workfile_template_builder.py index 75386d7e64..f4f9a34983 100644 --- a/client/ayon_core/hosts/maya/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/maya/api/workfile_template_builder.py @@ -12,14 +12,13 @@ from ayon_core.pipeline.workfile.workfile_template_builder import ( TemplateAlreadyImported, AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - PlaceholderLoadMixin, + PlaceholderItem, ) from ayon_core.tools.workfile_template_build import ( WorkfileBuildPlaceholderDialog, ) -from .lib import read, imprint, get_reference_node, get_main_window +from .lib import read, imprint, get_main_window PLACEHOLDER_SET = "PLACEHOLDERS_SET" @@ -91,170 +90,102 @@ class MayaTemplateBuilder(AbstractTemplateBuilder): return True -class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): - identifier = "maya.load" - label = "Maya load" +class MayaPlaceholderPlugin(PlaceholderPlugin): + """Base Placeholder Plugin for Maya with one unified cache. - def _collect_scene_placeholders(self): - # Cache placeholder data to shared data - placeholder_nodes = self.builder.get_shared_populate_data( - "placeholder_nodes" - ) - if placeholder_nodes is None: - attributes = cmds.ls("*.plugin_identifier", long=True) - placeholder_nodes = {} - for attribute in attributes: - node_name = attribute.rpartition(".")[0] - placeholder_nodes[node_name] = ( - self._parse_placeholder_node_data(node_name) - ) + Creates a locator as placeholder node, which during populate provide + all of its attributes defined on the locator's transform in + `placeholder.data` and where `placeholder.scene_identifier` is the + full path to the node. - self.builder.set_shared_populate_data( - "placeholder_nodes", placeholder_nodes - ) - return placeholder_nodes + Inherited classes must still implement `populate_placeholder` - def _parse_placeholder_node_data(self, node_name): - placeholder_data = read(node_name) - parent_name = ( - cmds.getAttr(node_name + ".parent", asString=True) - or node_name.rpartition("|")[0] - or "" - ) - if parent_name: - siblings = cmds.listRelatives(parent_name, children=True) - else: - siblings = cmds.ls(assemblies=True) - node_shortname = node_name.rpartition("|")[2] - current_index = cmds.getAttr(node_name + ".index", asString=True) - if current_index < 0: - current_index = siblings.index(node_shortname) + """ - placeholder_data.update({ - "parent": parent_name, - "index": current_index - }) - return placeholder_data + use_selection_as_parent = True + item_class = PlaceholderItem def _create_placeholder_name(self, placeholder_data): - placeholder_name_parts = placeholder_data["builder_type"].split("_") + return self.identifier.replace(".", "_") - pos = 1 - placeholder_product_type = placeholder_data.get("product_type") - if placeholder_product_type is None: - placeholder_product_type = placeholder_data.get("family") - - if placeholder_product_type: - placeholder_name_parts.insert(pos, placeholder_product_type) - pos += 1 - - # add loader arguments if any - loader_args = placeholder_data["loader_args"] - if loader_args: - loader_args = json.loads(loader_args.replace('\'', '\"')) - values = [v for v in loader_args.values()] - for value in values: - placeholder_name_parts.insert(pos, value) - pos += 1 - - placeholder_name = "_".join(placeholder_name_parts) - - return placeholder_name.capitalize() - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" + def _collect_scene_placeholders(self): + nodes_by_identifier = self.builder.get_shared_populate_data( + "placeholder_nodes" ) - if loaded_representation_ids is None: - try: - containers = cmds.sets("AVALON_CONTAINERS", q=True) - except ValueError: - containers = [] + if nodes_by_identifier is None: + # Cache placeholder data to shared data + nodes = cmds.ls("*.plugin_identifier", long=True, objectsOnly=True) - loaded_representation_ids = { - cmds.getAttr(container + ".representation") - for container in containers - } + nodes_by_identifier = {} + for node in nodes: + identifier = cmds.getAttr("{}.plugin_identifier".format(node)) + nodes_by_identifier.setdefault(identifier, []).append(node) + + # Set the cache self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids + "placeholder_nodes", nodes_by_identifier ) - return loaded_representation_ids + + return nodes_by_identifier def create_placeholder(self, placeholder_data): - selection = cmds.ls(selection=True) - if len(selection) > 1: - raise ValueError("More then one item are selected") - parent = selection[0] if selection else None + parent = None + if self.use_selection_as_parent: + selection = cmds.ls(selection=True) + if len(selection) > 1: + raise ValueError( + "More than one node is selected. " + "Please select only one to define the parent." + ) + parent = selection[0] if selection else None placeholder_data["plugin_identifier"] = self.identifier - placeholder_name = self._create_placeholder_name(placeholder_data) placeholder = cmds.spaceLocator(name=placeholder_name)[0] if parent: placeholder = cmds.parent(placeholder, selection[0])[0] - imprint(placeholder, placeholder_data) - - # Add helper attributes to keep placeholder info - cmds.addAttr( - placeholder, - longName="parent", - hidden=True, - dataType="string" - ) - cmds.addAttr( - placeholder, - longName="index", - hidden=True, - attributeType="short", - defaultValue=-1 - ) - - cmds.setAttr(placeholder + ".parent", "", type="string") + self.imprint(placeholder, placeholder_data) def update_placeholder(self, placeholder_item, placeholder_data): node_name = placeholder_item.scene_identifier - new_values = {} + + changed_values = {} for key, value in placeholder_data.items(): - placeholder_value = placeholder_item.data.get(key) - if value != placeholder_value: - new_values[key] = value - placeholder_item.data[key] = value + if value != placeholder_item.data.get(key): + changed_values[key] = value - for key in new_values.keys(): - cmds.deleteAttr(node_name + "." + key) + # Delete attributes to ensure we imprint new data with correct type + for key in changed_values.keys(): + placeholder_item.data[key] = value + if cmds.attributeQuery(key, node=node_name, exists=True): + attribute = "{}.{}".format(node_name, key) + cmds.deleteAttr(attribute) - imprint(node_name, new_values) + self.imprint(node_name, changed_values) def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, placeholder_data in scene_placeholders.items(): - if placeholder_data.get("plugin_identifier") != self.identifier: - continue - + placeholders = [] + nodes_by_identifier = self._collect_scene_placeholders() + for node in nodes_by_identifier.get(self.identifier, []): # TODO do data validations and maybe upgrades if they are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) + placeholder_data = self.read(node) + placeholders.append( + self.item_class(scene_identifier=node, + data=placeholder_data, + plugin=self) ) - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) + return placeholders def post_placeholder_process(self, placeholder, failed): """Cleanup placeholder after load of its corresponding representations. + Hide placeholder, add them to placeholder set. + Used only by PlaceholderCreateMixin and PlaceholderLoadMixin + Args: placeholder (PlaceholderItem): Item which was just used to load representation. @@ -263,81 +194,56 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): # Hide placeholder and add them to placeholder set node = placeholder.scene_identifier + # If we just populate the placeholders from current scene, the + # placeholder set will not be created so account for that. + if not cmds.objExists(PLACEHOLDER_SET): + cmds.sets(name=PLACEHOLDER_SET, empty=True) + cmds.sets(node, addElement=PLACEHOLDER_SET) cmds.hide(node) - cmds.setAttr(node + ".hiddenInOutliner", True) + cmds.setAttr("{}.hiddenInOutliner".format(node), True) def delete_placeholder(self, placeholder): - """Remove placeholder if building was successful""" - cmds.delete(placeholder.scene_identifier) + """Remove placeholder if building was successful - def load_succeed(self, placeholder, container): - self._parent_in_hierarchy(placeholder, container) - - def _parent_in_hierarchy(self, placeholder, container): - """Parent loaded container to placeholder's parent. - - ie : Set loaded content as placeholder's sibling - - Args: - container (str): Placeholder loaded containers + Used only by PlaceholderCreateMixin and PlaceholderLoadMixin. """ + node = placeholder.scene_identifier - if not container: - return + # To avoid that deleting a placeholder node will have Maya delete + # any objectSets the node was a member of we will first remove it + # from any sets it was a member of. This way the `PLACEHOLDERS_SET` + # will survive long enough + sets = cmds.listSets(o=node) or [] + for object_set in sets: + cmds.sets(node, remove=object_set) - roots = cmds.sets(container, q=True) or [] - ref_node = None - try: - ref_node = get_reference_node(roots) - except AssertionError as e: - self.log.info(e.args[0]) + cmds.delete(node) - nodes_to_parent = [] - for root in roots: - if ref_node: - ref_root = cmds.referenceQuery(root, nodes=True)[0] - ref_root = ( - cmds.listRelatives(ref_root, parent=True, path=True) or - [ref_root] - ) - nodes_to_parent.extend(ref_root) - continue - if root.endswith("_RN"): - # Backwards compatibility for hardcoded reference names. - refRoot = cmds.referenceQuery(root, n=True)[0] - refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot] - nodes_to_parent.extend(refRoot) - elif root not in cmds.listSets(allSets=True): - nodes_to_parent.append(root) + def imprint(self, node, data): + """Imprint call for placeholder node""" - elif not cmds.sets(root, q=True): - return + # Complicated data that can't be represented as flat maya attributes + # we write to json strings, e.g. multiselection EnumDef + for key, value in data.items(): + if isinstance(value, (list, tuple, dict)): + data[key] = "JSON::{}".format(json.dumps(value)) - # Move loaded nodes to correct index in outliner hierarchy - placeholder_form = cmds.xform( - placeholder.scene_identifier, - q=True, - matrix=True, - worldSpace=True - ) - scene_parent = cmds.listRelatives( - placeholder.scene_identifier, parent=True, fullPath=True - ) - for node in set(nodes_to_parent): - cmds.reorder(node, front=True) - cmds.reorder(node, relative=placeholder.data["index"]) - cmds.xform(node, matrix=placeholder_form, ws=True) - if scene_parent: - cmds.parent(node, scene_parent) - else: - cmds.parent(node, world=True) + imprint(node, data) - holding_sets = cmds.listSets(object=placeholder.scene_identifier) - if not holding_sets: - return - for holding_set in holding_sets: - cmds.sets(roots, forceElement=holding_set) + def read(self, node): + """Read call for placeholder node""" + + data = read(node) + + # Complicated data that can't be represented as flat maya attributes + # we read from json strings, e.g. multiselection EnumDef + for key, value in data.items(): + if isinstance(value, str) and value.startswith("JSON::"): + value = value[len("JSON::"):] # strip of JSON:: prefix + data[key] = json.loads(value) + + return data def build_workfile_template(*args): diff --git a/client/ayon_core/hosts/maya/api/yeti.py b/client/ayon_core/hosts/maya/api/yeti.py new file mode 100644 index 0000000000..1526c3a2f3 --- /dev/null +++ b/client/ayon_core/hosts/maya/api/yeti.py @@ -0,0 +1,101 @@ +from typing import List + +from maya import cmds + + +def get_yeti_user_variables(yeti_shape_node: str) -> List[str]: + """Get user defined yeti user variables for a `pgYetiMaya` shape node. + + Arguments: + yeti_shape_node (str): The `pgYetiMaya` shape node. + + Returns: + list: Attribute names (for a vector attribute it only lists the top + parent attribute, not the attribute per axis) + """ + + attrs = cmds.listAttr(yeti_shape_node, + userDefined=True, + string=("yetiVariableV_*", + "yetiVariableF_*")) or [] + valid_attrs = [] + for attr in attrs: + attr_type = cmds.attributeQuery(attr, node=yeti_shape_node, + attributeType=True) + if attr.startswith("yetiVariableV_") and attr_type == "double3": + # vector + valid_attrs.append(attr) + elif attr.startswith("yetiVariableF_") and attr_type == "double": + valid_attrs.append(attr) + + return valid_attrs + + +def create_yeti_variable(yeti_shape_node: str, + attr_name: str, + value=None, + force_value: bool = False) -> bool: + """Get user defined yeti user variables for a `pgYetiMaya` shape node. + + Arguments: + yeti_shape_node (str): The `pgYetiMaya` shape node. + attr_name (str): The fully qualified yeti variable name, e.g. + "yetiVariableF_myfloat" or "yetiVariableV_myvector" + value (object): The value to set (must match the type of the attribute) + When value is None it will ignored and not be set. + force_value (bool): Whether to set the value if the attribute already + exists or not. + + Returns: + bool: Whether the attribute value was set or not. + + """ + exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True) + if not exists: + if attr_name.startswith("yetiVariableV_"): + _create_vector_yeti_user_variable(yeti_shape_node, attr_name) + if attr_name.startswith("yetiVariableF_"): + _create_float_yeti_user_variable(yeti_shape_node, attr_name) + + if value is not None and (not exists or force_value): + plug = "{}.{}".format(yeti_shape_node, attr_name) + if ( + isinstance(value, (list, tuple)) + and attr_name.startswith("yetiVariableV_") + ): + cmds.setAttr(plug, *value, type="double3") + else: + cmds.setAttr(plug, value) + + return True + return False + + +def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str): + if not attr_name.startswith("yetiVariableV_"): + raise ValueError("Must start with yetiVariableV_") + cmds.addAttr(yeti_shape_node, + longName=attr_name, + attributeType="double3", + cachedInternally=True, + keyable=True) + for axis in "XYZ": + cmds.addAttr(yeti_shape_node, + longName="{}{}".format(attr_name, axis), + attributeType="double", + parent=attr_name, + cachedInternally=True, + keyable=True) + + +def _create_float_yeti_user_variable(yeti_node: str, attr_name: str): + if not attr_name.startswith("yetiVariableF_"): + raise ValueError("Must start with yetiVariableF_") + + cmds.addAttr(yeti_node, + longName=attr_name, + attributeType="double", + cachedInternally=True, + softMinValue=0, + softMaxValue=100, + keyable=True) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation.py b/client/ayon_core/hosts/maya/plugins/create/create_animation.py deleted file mode 100644 index f30d9aba81..0000000000 --- a/client/ayon_core/hosts/maya/plugins/create/create_animation.py +++ /dev/null @@ -1,89 +0,0 @@ -from ayon_core.hosts.maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreateAnimation(plugin.MayaHiddenCreator): - """Animation output for character rigs - - We hide the animation creator from the UI since the creation of it is - automated upon loading a rig. There's an inventory action to recreate it - for loaded rigs if by chance someone deleted the animation instance. - """ - identifier = "io.openpype.creators.maya.animation" - name = "animationDefault" - label = "Animation" - product_type = "animation" - icon = "male" - - write_color_sets = False - write_face_sets = False - include_parent_hierarchy = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=self.write_color_sets), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=self.write_face_sets), - BoolDef("writeNormals", - label="Write normals", - tooltip="Write normals with the deforming geometry", - default=True), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=self.include_parent_hierarchy), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def apply_settings(self, project_settings): - super(CreateAnimation, self).apply_settings(project_settings) - # Hardcoding creator to be enabled due to existing settings would - # disable the creator causing the creator plugin to not be - # discoverable. - self.enabled = True diff --git a/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py new file mode 100644 index 0000000000..069762e4ae --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/create/create_animation_pointcache.py @@ -0,0 +1,138 @@ +from maya import cmds + +from ayon_core.hosts.maya.api import lib, plugin + +from ayon_core.lib import ( + BoolDef, + NumberDef, +) + + +def _get_animation_attr_defs(cls): + """Get Animation generic definitions.""" + defs = lib.collect_animation_defs() + defs.extend( + [ + BoolDef("farm", label="Submit to Farm"), + NumberDef("priority", label="Farm job Priority", default=50), + BoolDef("refresh", label="Refresh viewport during export"), + BoolDef( + "includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip=( + "Whether to include parent hierarchy of nodes in the " + "publish instance." + ) + ), + BoolDef( + "includeUserDefinedAttributes", + label="Include User Defined Attributes", + tooltip=( + "Whether to include all custom maya attributes found " + "on nodes as attributes in the Alembic data." + ) + ), + ] + ) + + return defs + + +def convert_legacy_alembic_creator_attributes(node_data, class_name): + """This is a legacy transfer of creator attributes to publish attributes + for ExtractAlembic/ExtractAnimation plugin. + """ + publish_attributes = node_data["publish_attributes"] + + if class_name in publish_attributes: + return node_data + + attributes = [ + "attr", + "attrPrefix", + "visibleOnly", + "writeColorSets", + "writeFaceSets", + "writeNormals", + "renderableOnly", + "visibleOnly", + "worldSpace", + "renderableOnly" + ] + plugin_attributes = {} + for attr in attributes: + if attr not in node_data["creator_attributes"]: + continue + value = node_data["creator_attributes"].pop(attr) + + plugin_attributes[attr] = value + + publish_attributes[class_name] = plugin_attributes + + return node_data + + +class CreateAnimation(plugin.MayaHiddenCreator): + """Animation output for character rigs + + We hide the animation creator from the UI since the creation of it is + automated upon loading a rig. There's an inventory action to recreate it + for loaded rigs if by chance someone deleted the animation instance. + """ + + identifier = "io.openpype.creators.maya.animation" + name = "animationDefault" + label = "Animation" + product_type = "animation" + icon = "male" + + write_color_sets = False + write_face_sets = False + include_parent_hierarchy = False + include_user_defined_attributes = False + + def read_instance_node(self, node): + node_data = super(CreateAnimation, self).read_instance_node(node) + node_data = convert_legacy_alembic_creator_attributes( + node_data, "ExtractAnimation" + ) + return node_data + + def get_instance_attr_defs(self): + defs = super(CreateAnimation, self).get_instance_attr_defs() + defs += _get_animation_attr_defs(self) + return defs + + +class CreatePointCache(plugin.MayaCreator): + """Alembic pointcache for animated data""" + + identifier = "io.openpype.creators.maya.pointcache" + label = "Pointcache" + product_type = "pointcache" + icon = "gears" + write_color_sets = False + write_face_sets = False + include_user_defined_attributes = False + + def read_instance_node(self, node): + node_data = super(CreatePointCache, self).read_instance_node(node) + node_data = convert_legacy_alembic_creator_attributes( + node_data, "ExtractAlembic" + ) + return node_data + + def get_instance_attr_defs(self): + defs = super(CreatePointCache, self).get_instance_attr_defs() + defs += _get_animation_attr_defs(self) + return defs + + def create(self, product_name, instance_data, pre_create_data): + instance = super(CreatePointCache, self).create( + product_name, instance_data, pre_create_data + ) + instance_node = instance.get("instance_node") + + # For Arnold standin proxy + proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) + cmds.sets(proxy_set, forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py index dc0ffb02c1..e321c13ca0 100644 --- a/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -1,3 +1,5 @@ +from maya import cmds + from ayon_core.hosts.maya.api import ( lib, plugin @@ -87,16 +89,24 @@ class CreateArnoldSceneSource(plugin.MayaCreator): return defs + +class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource): + """Arnold Scene Source Proxy + + This product type facilitates working with proxy geometry in the viewport. + """ + + identifier = "io.openpype.creators.maya.assproxy" + label = "Arnold Scene Source Proxy" + product_type = "assProxy" + icon = "cube" + def create(self, product_name, instance_data, pre_create_data): - - from maya import cmds - instance = super(CreateArnoldSceneSource, self).create( product_name, instance_data, pre_create_data ) instance_node = instance.get("instance_node") - content = cmds.sets(name=instance_node + "_content_SET", empty=True) proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets([content, proxy], forceElement=instance_node) + cmds.sets([proxy], forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py b/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py deleted file mode 100644 index 05e3a1a29f..0000000000 --- a/client/ayon_core/hosts/maya/plugins/create/create_pointcache.py +++ /dev/null @@ -1,88 +0,0 @@ -from maya import cmds - -from ayon_core.hosts.maya.api import ( - lib, - plugin -) -from ayon_core.lib import ( - BoolDef, - TextDef -) - - -class CreatePointCache(plugin.MayaCreator): - """Alembic pointcache for animated data""" - - identifier = "io.openpype.creators.maya.pointcache" - label = "Pointcache" - product_type = "pointcache" - icon = "gears" - write_color_sets = False - write_face_sets = False - include_user_defined_attributes = False - - def get_instance_attr_defs(self): - - defs = lib.collect_animation_defs() - - defs.extend([ - BoolDef("writeColorSets", - label="Write vertex colors", - tooltip="Write vertex colors with the geometry", - default=False), - BoolDef("writeFaceSets", - label="Write face sets", - tooltip="Write face sets with the geometry", - default=False), - BoolDef("renderableOnly", - label="Renderable Only", - tooltip="Only export renderable visible shapes", - default=False), - BoolDef("visibleOnly", - label="Visible Only", - tooltip="Only export dag objects visible during " - "frame range", - default=False), - BoolDef("includeParentHierarchy", - label="Include Parent Hierarchy", - tooltip="Whether to include parent hierarchy of nodes in " - "the publish instance", - default=False), - BoolDef("worldSpace", - label="World-Space Export", - default=True), - BoolDef("refresh", - label="Refresh viewport during export", - default=False), - BoolDef("includeUserDefinedAttributes", - label="Include User Defined Attributes", - default=self.include_user_defined_attributes), - TextDef("attr", - label="Custom Attributes", - default="", - placeholder="attr1, attr2"), - TextDef("attrPrefix", - label="Custom Attributes Prefix", - default="", - placeholder="prefix1, prefix2") - ]) - - # TODO: Implement these on a Deadline plug-in instead? - """ - # Default to not send to farm. - self.data["farm"] = False - self.data["priority"] = 50 - """ - - return defs - - def create(self, product_name, instance_data, pre_create_data): - - instance = super(CreatePointCache, self).create( - product_name, instance_data, pre_create_data - ) - instance_node = instance.get("instance_node") - - # For Arnold standin proxy - proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True) - cmds.sets(proxy_set, forceElement=instance_node) diff --git a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py index 4b7d2f42ab..ae3b68965a 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_arnold_standin.py @@ -12,6 +12,7 @@ from ayon_core.hosts.maya.api.lib import ( unique_namespace, get_attribute_input, maintained_selection, + get_fps_for_current_context ) from ayon_core.hosts.maya.api.pipeline import containerise from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type @@ -29,7 +30,13 @@ class ArnoldStandinLoader(load.LoaderPlugin): """Load as Arnold standin""" product_types = { - "ass", "animation", "model", "proxyAbc", "pointcache", "usd" + "ass", + "assProxy", + "animation", + "model", + "proxyAbc", + "pointcache", + "usd" } representations = {"ass", "abc", "usda", "usdc", "usd"} @@ -95,8 +102,10 @@ class ArnoldStandinLoader(load.LoaderPlugin): sequence = is_sequence(os.listdir(os.path.dirname(repre_path))) cmds.setAttr(standin_shape + ".useFrameExtension", sequence) - fps = float(version_attributes.get("fps")) or 25 - cmds.setAttr(standin_shape + ".abcFPS", fps) + fps = ( + version_attributes.get("fps") or get_fps_for_current_context() + ) + cmds.setAttr(standin_shape + ".abcFPS", float(fps)) nodes = [root, standin, standin_shape] if operator is not None: @@ -128,6 +137,18 @@ class ArnoldStandinLoader(load.LoaderPlugin): proxy_path = "/".join([os.path.dirname(path), proxy_basename]) return proxy_basename, proxy_path + def _update_operators(self, string_replace_operator, proxy_basename, path): + cmds.setAttr( + string_replace_operator + ".match", + proxy_basename.split(".")[0], + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path).split(".")[0], + type="string" + ) + def _setup_proxy(self, shape, path, namespace): proxy_basename, proxy_path = self._get_proxy_path(path) @@ -150,16 +171,7 @@ class ArnoldStandinLoader(load.LoaderPlugin): "*.(@node=='{}')".format(node_type), type="string" ) - cmds.setAttr( - string_replace_operator + ".match", - proxy_basename, - type="string" - ) - cmds.setAttr( - string_replace_operator + ".replace", - os.path.basename(path), - type="string" - ) + self._update_operators(string_replace_operator, proxy_basename, path) cmds.connectAttr( string_replace_operator + ".out", @@ -194,18 +206,9 @@ class ArnoldStandinLoader(load.LoaderPlugin): path = get_representation_path(repre_entity) proxy_basename, proxy_path = self._get_proxy_path(path) - # Whether there is proxy or so, we still update the string operator. + # Whether there is proxy or not, we still update the string operator. # If no proxy exists, the string operator won't replace anything. - cmds.setAttr( - string_replace_operator + ".match", - proxy_basename, - type="string" - ) - cmds.setAttr( - string_replace_operator + ".replace", - os.path.basename(path), - type="string" - ) + self._update_operators(string_replace_operator, proxy_basename, path) dso_path = path if os.path.exists(proxy_path): diff --git a/client/ayon_core/hosts/maya/plugins/load/load_as_template.py b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py new file mode 100644 index 0000000000..f696d369e3 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/load/load_as_template.py @@ -0,0 +1,39 @@ +from ayon_core.lib import ( + BoolDef +) +from ayon_core.pipeline import ( + load, + registered_host +) +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaTemplateBuilder +) + + +class LoadAsTemplate(load.LoaderPlugin): + """Load workfile as a template """ + + product_types = {"workfile", "mayaScene"} + label = "Load as template" + representations = ["ma", "mb"] + icon = "wrench" + color = "#775555" + order = 10 + + options = [ + BoolDef("keep_placeholders", + label="Keep Placeholders", + default=False), + BoolDef("create_first_version", + label="Create First Version", + default=False), + ] + + def load(self, context, name, namespace, data): + keep_placeholders = data.get("keep_placeholders", False) + create_first_version = data.get("create_first_version", False) + path = self.filepath_from_context(context) + builder = MayaTemplateBuilder(registered_host()) + builder.build_template(template_path=path, + keep_placeholders=keep_placeholders, + create_first_version=create_first_version) diff --git a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py index caea6b7a72..4ca9ae9d03 100644 --- a/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/load/load_yeti_cache.py @@ -12,6 +12,7 @@ from ayon_core.pipeline import ( get_representation_path ) from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.yeti import create_yeti_variable from ayon_core.hosts.maya.api.pipeline import containerise from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type @@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = { "viewportDensity", "viewportWidth", "viewportLength", + "renderDensity", + "renderWidth", + "renderLength", + "increaseRenderBounds" } +SKIP_ATTR_MESSAGE = ( + "Skipping updating %s.%s to %s because it " + "is considered a local overridable attribute. " + "Either set manually or the load the cache " + "anew." +) + def set_attribute(node, attr, value): """Wrapper of set attribute which ignores None values""" @@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin): for attr, value in node_settings["attrs"].items(): if attr in SKIP_UPDATE_ATTRS: + self.log.info( + SKIP_ATTR_MESSAGE, yeti_node, attr, value + ) continue set_attribute(attr, value, yeti_node) + # Set up user defined attributes + user_variables = node_settings.get("user_variables", {}) + for attr, value in user_variables.items(): + was_value_set = create_yeti_variable( + yeti_shape_node=yeti_node, + attr_name=attr, + value=value, + # We do not want to update the + # value if it already exists so + # that any local overrides that + # may have been applied still + # persist + force_value=False + ) + if not was_value_set: + self.log.info( + SKIP_ATTR_MESSAGE, yeti_node, attr, value + ) + cmds.setAttr("{}.representation".format(container_node), repre_entity["id"], typ="string") @@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin): for attr, value in attributes.items(): set_attribute(attr, value, yeti_node) + # Set up user defined attributes + user_variables = node_settings.get("user_variables", {}) + for attr, value in user_variables.items(): + create_yeti_variable(yeti_shape_node=yeti_node, + attr_name=attr, + value=value) + # Connect to the time node cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node) diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py index 2ab6511ece..391c80c84e 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_animation.py @@ -58,4 +58,3 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): if instance.data.get("farm"): instance.data["families"].append("publish.farm") - diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py index 0db89bee31..fb71e128eb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -10,21 +10,23 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): # Offset to be after renderable camera collection. order = pyblish.api.CollectorOrder + 0.2 label = "Collect Arnold Scene Source" - families = ["ass"] + families = ["ass", "assProxy"] def process(self, instance): - objsets = instance.data["setMembers"] + instance.data["members"] = [] + for set_member in instance.data["setMembers"]: + if cmds.nodeType(set_member) != "objectSet": + instance.data["members"].extend(self.get_hierarchy(set_member)) + continue - for objset in objsets: - objset = str(objset) - members = cmds.sets(objset, query=True) + members = cmds.sets(set_member, query=True) members = cmds.ls(members, long=True) if members is None: - self.log.warning("Skipped empty instance: \"%s\" " % objset) + self.log.warning( + "Skipped empty instance: \"%s\" " % set_member + ) continue - if objset.endswith("content_SET"): - instance.data["contentMembers"] = self.get_hierarchy(members) - if objset.endswith("proxy_SET"): + if set_member.endswith("proxy_SET"): instance.data["proxy"] = self.get_hierarchy(members) # Use camera in object set if present else default to render globals @@ -33,7 +35,7 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] if renderable: camera = renderable[0] - for node in instance.data["contentMembers"]: + for node in instance.data["members"]: camera_shapes = cmds.listRelatives( node, shapes=True, type="camera" ) @@ -46,18 +48,11 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): self.log.debug("data: {}".format(instance.data)) def get_hierarchy(self, nodes): - """Return nodes with all their children. - - Arguments: - nodes (List[str]): List of nodes to collect children hierarchy for - - Returns: - list: Input nodes with their children hierarchy - - """ + """Return nodes with all their children""" nodes = cmds.ls(nodes, long=True) if not nodes: return [] - - children = get_all_children(nodes, ignore_intermediate_objects=True) - return list(children.union(nodes)) + children = get_all_children(nodes) + # Make sure nodes merged with children only + # contains unique entries + return list(set(nodes + list(children))) diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py index 16fef2e168..3d586d48fb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_user_defined_attributes.py @@ -14,7 +14,9 @@ class CollectUserDefinedAttributes(pyblish.api.InstancePlugin): def process(self, instance): # Collect user defined attributes. - if not instance.data.get("includeUserDefinedAttributes", False): + if not instance.data["creator_attributes"].get( + "includeUserDefinedAttributes" + ): return if "out_hierarchy" in instance.data: diff --git a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py index 067a7bc532..e1755e4212 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/collect_yeti_cache.py @@ -3,6 +3,7 @@ from maya import cmds import pyblish.api from ayon_core.hosts.maya.api import lib +from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables SETTINGS = { @@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin): - "increaseRenderBounds" - "imageSearchPath" - Other information is the name of the transform and it's Colorbleed ID + Other information is the name of the transform and its `cbId` """ order = pyblish.api.CollectorOrder + 0.45 @@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin): # Get specific node attributes attr_data = {} for attr in SETTINGS: + # Ignore non-existing attributes with a warning, e.g. cbId + # if they have not been generated yet + if not cmds.attributeQuery(attr, node=shape, exists=True): + self.log.warning( + "Attribute '{}' not found on Yeti node: {}".format( + attr, shape + ) + ) + continue + current = cmds.getAttr("%s.%s" % (shape, attr)) # change None to empty string as Maya doesn't support # NoneType in attributes @@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin): current = "" attr_data[attr] = current + # Get user variable attributes + user_variable_attrs = { + attr: lib.get_attribute("{}.{}".format(shape, attr)) + for attr in get_yeti_user_variables(shape) + } + # Get transform data parent = cmds.listRelatives(shape, parent=True)[0] transform_data = {"name": parent, "cbId": lib.get_id(parent)} @@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin): "name": shape, "cbId": lib.get_id(shape), "attrs": attr_data, + "user_variables": user_variable_attrs } settings["nodes"].append(shape_data) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py index ed8f2ad40c..fb4c41f1de 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -17,8 +17,7 @@ class ExtractArnoldSceneSource(publish.Extractor): families = ["ass"] asciiAss = False - def process(self, instance): - staging_dir = self.staging_dir(instance) + def _pre_process(self, instance, staging_dir): file_path = os.path.join(staging_dir, "{}.ass".format(instance.name)) # Mask @@ -70,24 +69,38 @@ class ExtractArnoldSceneSource(publish.Extractor): "mask": mask } - filenames, nodes_by_id = self._extract( - instance.data["contentMembers"], attribute_data, kwargs - ) - if "representations" not in instance.data: instance.data["representations"] = [] + return attribute_data, kwargs + + def process(self, instance): + staging_dir = self.staging_dir(instance) + attribute_data, kwargs = self._pre_process(instance, staging_dir) + + filenames = self._extract( + instance.data["members"], attribute_data, kwargs + ) + + self._post_process( + instance, filenames, staging_dir, kwargs["startFrame"] + ) + + def _post_process(self, instance, filenames, staging_dir, frame_start): + nodes_by_id = self._nodes_by_id(instance[:]) representation = { "name": "ass", "ext": "ass", "files": filenames if len(filenames) > 1 else filenames[0], "stagingDir": staging_dir, - "frameStart": kwargs["startFrame"] + "frameStart": frame_start } instance.data["representations"].append(representation) - json_path = os.path.join(staging_dir, "{}.json".format(instance.name)) + json_path = os.path.join( + staging_dir, "{}.json".format(instance.name) + ) with open(json_path, "w") as f: json.dump(nodes_by_id, f) @@ -104,13 +117,68 @@ class ExtractArnoldSceneSource(publish.Extractor): "Extracted instance {} to: {}".format(instance.name, staging_dir) ) - # Extract proxy. - if not instance.data.get("proxy", []): - return + def _nodes_by_id(self, nodes): + nodes_by_id = defaultdict(list) - kwargs["filename"] = file_path.replace(".ass", "_proxy.ass") + for node in nodes: + id = lib.get_id(node) - filenames, _ = self._extract( + if id is None: + continue + + # Converting Maya hierarchy separator "|" to Arnold separator "/". + nodes_by_id[id].append(node.replace("|", "/")) + + return nodes_by_id + + def _extract(self, nodes, attribute_data, kwargs): + filenames = [] + with lib.attribute_values(attribute_data): + with lib.maintained_selection(): + self.log.debug( + "Writing: {}".format(nodes) + ) + cmds.select(nodes, noExpand=True) + + self.log.debug( + "Extracting ass sequence with: {}".format(kwargs) + ) + + exported_files = cmds.arnoldExportAss(**kwargs) + + for file in exported_files: + filenames.append(os.path.split(file)[1]) + + self.log.debug("Exported: {}".format(filenames)) + + return filenames + + +class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource): + """Extract the content of the instance to an Arnold Scene Source file.""" + + label = "Extract Arnold Scene Source Proxy" + hosts = ["maya"] + families = ["assProxy"] + asciiAss = True + + def process(self, instance): + staging_dir = self.staging_dir(instance) + attribute_data, kwargs = self._pre_process(instance, staging_dir) + + filenames, _ = self._duplicate_extract( + instance.data["members"], attribute_data, kwargs + ) + + self._post_process( + instance, filenames, staging_dir, kwargs["startFrame"] + ) + + kwargs["filename"] = os.path.join( + staging_dir, "{}_proxy.ass".format(instance.name) + ) + + filenames, _ = self._duplicate_extract( instance.data["proxy"], attribute_data, kwargs ) @@ -125,12 +193,11 @@ class ExtractArnoldSceneSource(publish.Extractor): instance.data["representations"].append(representation) - def _extract(self, nodes, attribute_data, kwargs): + def _duplicate_extract(self, nodes, attribute_data, kwargs): self.log.debug( "Writing {} with:\n{}".format(kwargs["filename"], kwargs) ) filenames = [] - nodes_by_id = defaultdict(list) # Duplicating nodes so they are direct children of the world. This # makes the hierarchy of any exported ass file the same. with lib.delete_after() as delete_bin: @@ -147,7 +214,9 @@ class ExtractArnoldSceneSource(publish.Extractor): if not shapes: continue - duplicate_transform = cmds.duplicate(node)[0] + basename = cmds.duplicate(node)[0] + parents = cmds.ls(node, long=True)[0].split("|")[:-1] + duplicate_transform = "|".join(parents + [basename]) if cmds.listRelatives(duplicate_transform, parent=True): duplicate_transform = cmds.parent( @@ -172,28 +241,7 @@ class ExtractArnoldSceneSource(publish.Extractor): duplicate_nodes.extend(shapes) delete_bin.append(duplicate_transform) - # Copy cbId to mtoa_constant. - for node in duplicate_nodes: - # Converting Maya hierarchy separator "|" to Arnold - # separator "/". - nodes_by_id[lib.get_id(node)].append(node.replace("|", "/")) - - with lib.attribute_values(attribute_data): - with lib.maintained_selection(): - self.log.debug( - "Writing: {}".format(duplicate_nodes) - ) - cmds.select(duplicate_nodes, noExpand=True) - - self.log.debug( - "Extracting ass sequence with: {}".format(kwargs) - ) - - exported_files = cmds.arnoldExportAss(**kwargs) - - for file in exported_files: - filenames.append(os.path.split(file)[1]) - - self.log.debug("Exported: {}".format(filenames)) + nodes_by_id = self._nodes_by_id(duplicate_nodes) + filenames = self._extract(duplicate_nodes, attribute_data, kwargs) return filenames, nodes_by_id diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py index 2c23f9b752..5f51dc38cb 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_assembly.py @@ -2,7 +2,7 @@ import os import json from ayon_core.pipeline import publish -from ayon_core.hosts.maya.api.lib import extract_alembic +from ayon_core.hosts.maya.api.alembic import extract_alembic from maya import cmds diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py index 5de72f7674..cc930e49cc 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_pointcache.py @@ -1,17 +1,29 @@ import os +from collections import OrderedDict from maya import cmds from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, + get_all_children, suspended_refresh, maintained_selection, iter_visible_nodes_in_range ) +from ayon_core.lib import ( + BoolDef, + TextDef, + NumberDef, + EnumDef, + UISeparatorDef, + UILabelDef, +) +from ayon_core.pipeline.publish import AYONPyblishPluginMixin +from ayon_core.pipeline import KnownPublishError -class ExtractAlembic(publish.Extractor): +class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin): """Produce an alembic of just point positions and normals. Positions and normals, uvs, creases are preserved, but nothing more, @@ -27,8 +39,35 @@ class ExtractAlembic(publish.Extractor): targets = ["local", "remote"] # From settings + attr = [] + attrPrefix = [] bake_attributes = [] bake_attribute_prefixes = [] + dataFormat = "ogawa" + eulerFilter = False + melPerFrameCallback = "" + melPostJobCallback = "" + overrides = [] + preRoll = False + preRollStartFrame = 0 + pythonPerFrameCallback = "" + pythonPostJobCallback = "" + renderableOnly = False + stripNamespaces = True + uvsOnly = False + uvWrite = False + userAttr = "" + userAttrPrefix = "" + verbose = False + visibleOnly = False + wholeFrameGeo = False + worldSpace = True + writeColorSets = False + writeCreases = False + writeFaceSets = False + writeNormals = True + writeUVSets = False + writeVisibility = False def process(self, instance): if instance.data.get("farm"): @@ -41,16 +80,38 @@ class ExtractAlembic(publish.Extractor): start = float(instance.data.get("frameStartHandle", 1)) end = float(instance.data.get("frameEndHandle", 1)) - attrs = instance.data.get("attr", "").split(";") - attrs = [value for value in attrs if value.strip()] + attribute_values = self.get_attr_values_from_data( + instance.data + ) + + attrs = [ + attr.strip() + for attr in attribute_values.get("attr", "").split(";") + if attr.strip() + ] attrs += instance.data.get("userDefinedAttributes", []) attrs += self.bake_attributes attrs += ["cbId"] - attr_prefixes = instance.data.get("attrPrefix", "").split(";") - attr_prefixes = [value for value in attr_prefixes if value.strip()] + attr_prefixes = [ + attr.strip() + for attr in attribute_values.get("attrPrefix", "").split(";") + if attr.strip() + ] attr_prefixes += self.bake_attribute_prefixes + user_attrs = [ + attr.strip() + for attr in attribute_values.get("userAttr", "").split(";") + if attr.strip() + ] + + user_attr_prefixes = [ + attr.strip() + for attr in attribute_values.get("userAttrPrefix", "").split(";") + if attr.strip() + ] + self.log.debug("Extracting pointcache..") dirname = self.staging_dir(instance) @@ -58,28 +119,82 @@ class ExtractAlembic(publish.Extractor): filename = "{name}.abc".format(**instance.data) path = os.path.join(parent_dir, filename) - options = { - "step": instance.data.get("step", 1.0), - "attr": attrs, - "attrPrefix": attr_prefixes, - "writeVisibility": True, - "writeCreases": True, - "writeColorSets": instance.data.get("writeColorSets", False), - "writeFaceSets": instance.data.get("writeFaceSets", False), - "uvWrite": True, - "selection": True, - "worldSpace": instance.data.get("worldSpace", True) - } - + root = None if not instance.data.get("includeParentHierarchy", True): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = roots + root = roots - if int(cmds.about(version=True)) >= 2017: - # Since Maya 2017 alembic supports multiple uv sets - write them. - options["writeUVSets"] = True + kwargs = { + "file": path, + "attr": attrs, + "attrPrefix": attr_prefixes, + "userAttr": user_attrs, + "userAttrPrefix": user_attr_prefixes, + "dataFormat": attribute_values.get("dataFormat", self.dataFormat), + "endFrame": end, + "eulerFilter": attribute_values.get( + "eulerFilter", self.eulerFilter + ), + "preRoll": attribute_values.get("preRoll", self.preRoll), + "preRollStartFrame": attribute_values.get( + "preRollStartFrame", self.preRollStartFrame + ), + "renderableOnly": attribute_values.get( + "renderableOnly", self.renderableOnly + ), + "root": root, + "selection": True, + "startFrame": start, + "step": instance.data.get( + "creator_attributes", {} + ).get("step", 1.0), + "stripNamespaces": attribute_values.get( + "stripNamespaces", self.stripNamespaces + ), + "uvWrite": attribute_values.get("uvWrite", self.uvWrite), + "verbose": attribute_values.get("verbose", self.verbose), + "wholeFrameGeo": attribute_values.get( + "wholeFrameGeo", self.wholeFrameGeo + ), + "worldSpace": attribute_values.get("worldSpace", self.worldSpace), + "writeColorSets": attribute_values.get( + "writeColorSets", self.writeColorSets + ), + "writeCreases": attribute_values.get( + "writeCreases", self.writeCreases + ), + "writeFaceSets": attribute_values.get( + "writeFaceSets", self.writeFaceSets + ), + "writeUVSets": attribute_values.get( + "writeUVSets", self.writeUVSets + ), + "writeVisibility": attribute_values.get( + "writeVisibility", self.writeVisibility + ), + "uvsOnly": attribute_values.get( + "uvsOnly", self.uvsOnly + ), + "melPerFrameCallback": attribute_values.get( + "melPerFrameCallback", self.melPerFrameCallback + ), + "melPostJobCallback": attribute_values.get( + "melPostJobCallback", self.melPostJobCallback + ), + "pythonPerFrameCallback": attribute_values.get( + "pythonPerFrameCallback", self.pythonPostJobCallback + ), + "pythonPostJobCallback": attribute_values.get( + "pythonPostJobCallback", self.pythonPostJobCallback + ), + # Note that this converts `writeNormals` to `noNormals` for the + # `AbcExport` equivalent in `extract_alembic` + "noNormals": not attribute_values.get( + "writeNormals", self.writeNormals + ), + } if instance.data.get("visibleOnly", False): # If we only want to include nodes that are visible in the frame @@ -87,20 +202,19 @@ class ExtractAlembic(publish.Extractor): # flag does not filter out those that are only hidden on some # frames as it counts "animated" or "connected" visibilities as # if it's always visible. - nodes = list(iter_visible_nodes_in_range(nodes, - start=start, - end=end)) + nodes = list( + iter_visible_nodes_in_range(nodes, start=start, end=end) + ) suspend = not instance.data.get("refresh", False) with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(nodes, noExpand=True) - extract_alembic( - file=path, - startFrame=start, - endFrame=end, - **options + self.log.debug( + "Running `extract_alembic` with the keyword arguments: " + "{}".format(kwargs) ) + extract_alembic(**kwargs) if "representations" not in instance.data: instance.data["representations"] = [] @@ -124,22 +238,17 @@ class ExtractAlembic(publish.Extractor): return path = path.replace(".abc", "_proxy.abc") + kwargs["file"] = path if not instance.data.get("includeParentHierarchy", True): # Set the root nodes if we don't want to include parents # The roots are to be considered the ones that are the actual # direct members of the set - options["root"] = instance.data["proxyRoots"] + kwargs["root"] = instance.data["proxyRoots"] with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(instance.data["proxy"]) - extract_alembic( - file=path, - startFrame=start, - endFrame=end, - **options - ) - + extract_alembic(**kwargs) representation = { "name": "proxy", "ext": "abc", @@ -152,24 +261,265 @@ class ExtractAlembic(publish.Extractor): def get_members_and_roots(self, instance): return instance[:], instance.data.get("setMembers") + @classmethod + def get_attribute_defs(cls): + if not cls.overrides: + return [] + + override_defs = OrderedDict({ + "eulerFilter": BoolDef( + "eulerFilter", + label="Euler Filter", + default=cls.eulerFilter, + tooltip="Apply Euler filter while sampling rotations." + ), + "renderableOnly": BoolDef( + "renderableOnly", + label="Renderable Only", + default=cls.renderableOnly, + tooltip="Only export renderable visible shapes." + ), + "stripNamespaces": BoolDef( + "stripNamespaces", + label="Strip Namespaces", + default=cls.stripNamespaces, + tooltip=( + "Namespaces will be stripped off of the node before being " + "written to Alembic." + ) + ), + "uvsOnly": BoolDef( + "uvsOnly", + label="UVs Only", + default=cls.uvsOnly, + tooltip=( + "If this flag is present, only uv data for PolyMesh and " + "SubD shapes will be written to the Alembic file." + ) + ), + "uvWrite": BoolDef( + "uvWrite", + label="UV Write", + default=cls.uvWrite, + tooltip=( + "Uv data for PolyMesh and SubD shapes will be written to " + "the Alembic file." + ) + ), + "verbose": BoolDef( + "verbose", + label="Verbose", + default=cls.verbose, + tooltip="Prints the current frame that is being evaluated." + ), + "visibleOnly": BoolDef( + "visibleOnly", + label="Visible Only", + default=cls.visibleOnly, + tooltip="Only export dag objects visible during frame range." + ), + "wholeFrameGeo": BoolDef( + "wholeFrameGeo", + label="Whole Frame Geo", + default=cls.wholeFrameGeo, + tooltip=( + "Data for geometry will only be written out on whole " + "frames." + ) + ), + "worldSpace": BoolDef( + "worldSpace", + label="World Space", + default=cls.worldSpace, + tooltip="Any root nodes will be stored in world space." + ), + "writeColorSets": BoolDef( + "writeColorSets", + label="Write Color Sets", + default=cls.writeColorSets, + tooltip="Write vertex colors with the geometry." + ), + "writeCreases": BoolDef( + "writeCreases", + label="Write Creases", + default=cls.writeCreases, + tooltip="Write the geometry's edge and vertex crease " + "information." + ), + "writeFaceSets": BoolDef( + "writeFaceSets", + label="Write Face Sets", + default=cls.writeFaceSets, + tooltip="Write face sets with the geometry." + ), + "writeNormals": BoolDef( + "writeNormals", + label="Write Normals", + default=cls.writeNormals, + tooltip="Write normals with the deforming geometry." + ), + "writeUVSets": BoolDef( + "writeUVSets", + label="Write UV Sets", + default=cls.writeUVSets, + tooltip=( + "Write all uv sets on MFnMeshes as vector 2 indexed " + "geometry parameters with face varying scope." + ) + ), + "writeVisibility": BoolDef( + "writeVisibility", + label="Write Visibility", + default=cls.writeVisibility, + tooltip=( + "Visibility state will be stored in the Alembic file. " + "Otherwise everything written out is treated as visible." + ) + ), + "preRoll": BoolDef( + "preRoll", + label="Pre Roll", + default=cls.preRoll, + tooltip="This frame range will not be sampled." + ), + "preRollStartFrame": NumberDef( + "preRollStartFrame", + label="Pre Roll Start Frame", + tooltip=( + "The frame to start scene evaluation at. This is used" + " to set the starting frame for time dependent " + "translations and can be used to evaluate run-up that" + " isn't actually translated." + ), + default=cls.preRollStartFrame + ), + "dataFormat": EnumDef( + "dataFormat", + label="Data Format", + items=["ogawa", "HDF"], + default=cls.dataFormat, + tooltip="The data format to use to write the file." + ), + "attr": TextDef( + "attr", + label="Custom Attributes", + placeholder="attr1; attr2; ...", + default=cls.attr, + tooltip=( + "Attributes matching by name will be included in the " + "Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "attrPrefix": TextDef( + "attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1; prefix2; ...", + default=cls.attrPrefix, + tooltip=( + "Attributes starting with these prefixes will be included " + "in the Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "userAttr": TextDef( + "userAttr", + label="User Attr", + placeholder="attr1; attr2; ...", + default=cls.userAttr, + tooltip=( + "Attributes matching by name will be included in the " + "Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "userAttrPrefix": TextDef( + "userAttrPrefix", + label="User Attr Prefix", + placeholder="prefix1; prefix2; ...", + default=cls.userAttrPrefix, + tooltip=( + "Attributes starting with these prefixes will be included " + "in the Alembic export. Attributes should be separated by " + "semi-colon `;`" + ) + ), + "melPerFrameCallback": TextDef( + "melPerFrameCallback", + label="Mel Per Frame Callback", + default=cls.melPerFrameCallback, + tooltip=( + "When each frame (and the static frame) is evaluated the " + "string specified is evaluated as a Mel command." + ) + ), + "melPostJobCallback": TextDef( + "melPostJobCallback", + label="Mel Post Job Callback", + default=cls.melPostJobCallback, + tooltip=( + "When the translation has finished the string specified " + "is evaluated as a Mel command." + ) + ), + "pythonPerFrameCallback": TextDef( + "pythonPerFrameCallback", + label="Python Per Frame Callback", + default=cls.pythonPerFrameCallback, + tooltip=( + "When each frame (and the static frame) is evaluated the " + "string specified is evaluated as a python command." + ) + ), + "pythonPostJobCallback": TextDef( + "pythonPostJobCallback", + label="Python Post Frame Callback", + default=cls.pythonPostJobCallback, + tooltip=( + "When the translation has finished the string specified " + "is evaluated as a python command." + ) + ) + }) + + defs = super(ExtractAlembic, cls).get_attribute_defs() + + defs.extend([ + UISeparatorDef("sep_alembic_options"), + UILabelDef("Alembic Options"), + ]) + + # The Arguments that can be modified by the Publisher + overrides = set(cls.overrides) + for key, value in override_defs.items(): + if key not in overrides: + continue + + defs.append(value) + + defs.append( + UISeparatorDef("sep_alembic_options_end") + ) + + return defs + class ExtractAnimation(ExtractAlembic): - label = "Extract Animation" + label = "Extract Animation (Alembic)" families = ["animation"] def get_members_and_roots(self, instance): - # Collect the out set nodes out_sets = [node for node in instance if node.endswith("out_SET")] if len(out_sets) != 1: - raise RuntimeError("Couldn't find exactly one out_SET: " - "{0}".format(out_sets)) + raise KnownPublishError( + "Couldn't find exactly one out_SET: {0}".format(out_sets) + ) out_set = out_sets[0] - roots = cmds.sets(out_set, query=True) + roots = cmds.sets(out_set, query=True) or [] # Include all descendants - nodes = roots + cmds.listRelatives(roots, - allDescendents=True, - fullPath=True) or [] + nodes = roots.copy() + nodes.extend(get_all_children(roots, ignore_intermediate_objects=True)) return nodes, roots diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py index 3637a58614..5aefdfc33a 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_proxy_abc.py @@ -3,8 +3,8 @@ import os from maya import cmds from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, suspended_refresh, maintained_selection, iter_visible_nodes_in_range diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py index 1a389f3d33..b5cc7745a1 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_unreal_skeletalmesh_abc.py @@ -5,8 +5,8 @@ import os from maya import cmds # noqa from ayon_core.pipeline import publish +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.hosts.maya.api.lib import ( - extract_alembic, suspended_refresh, maintained_selection ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py index d799486184..54d295b479 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py +++ b/client/ayon_core/hosts/maya/plugins/publish/extract_workfile_xgen.py @@ -5,7 +5,7 @@ import copy from maya import cmds import pyblish.api -from ayon_core.hosts.maya.api.lib import extract_alembic +from ayon_core.hosts.maya.api.alembic import extract_alembic from ayon_core.pipeline import publish diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py new file mode 100644 index 0000000000..11f4c313fa --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_alembic_options_defaults.py @@ -0,0 +1,130 @@ +import inspect +import pyblish.api + +from ayon_core.pipeline import OptionalPyblishPluginMixin +from ayon_core.pipeline.publish import RepairAction, PublishValidationError + + +class ValidateAlembicDefaultsPointcache( + pyblish.api.InstancePlugin, OptionalPyblishPluginMixin +): + """Validate the attributes on the instance are defaults. + + The defaults are defined in the project settings. + """ + + order = pyblish.api.ValidatorOrder + families = ["pointcache"] + hosts = ["maya"] + label = "Validate Alembic Options Defaults" + actions = [RepairAction] + optional = True + + plugin_name = "ExtractAlembic" + + @classmethod + def _get_settings(cls, context): + maya_settings = context.data["project_settings"]["maya"] + settings = maya_settings["publish"]["ExtractAlembic"] + return settings + + @classmethod + def _get_publish_attributes(cls, instance): + return instance.data["publish_attributes"][cls.plugin_name] + + def process(self, instance): + if not self.is_active(instance.data): + return + + settings = self._get_settings(instance.context) + attributes = self._get_publish_attributes(instance) + + invalid = {} + for key, value in attributes.items(): + if key not in settings: + # This may occur if attributes have changed over time and an + # existing instance has older legacy attributes that do not + # match the current settings definition. + self.log.warning( + "Publish attribute %s not found in Alembic Export " + "default settings. Ignoring validation for attribute.", + key + ) + continue + + default_value = settings[key] + + # Lists are best to compared sorted since we cant rely on the order + # of the items. + if isinstance(value, list): + value = sorted(value) + default_value = sorted(default_value) + + if value != default_value: + invalid[key] = value, default_value + + if invalid: + non_defaults = "\n".join( + f"- {key}: {value} \t(default: {default_value})" + for key, (value, default_value) in invalid.items() + ) + + raise PublishValidationError( + "Alembic extract options differ from default values:\n" + f"{non_defaults}", + description=self.get_description() + ) + + @staticmethod + def get_description(): + return inspect.cleandoc( + """### Alembic Extract settings differ from defaults + + The alembic export options differ from the project default values. + + If this is intentional you can disable this validation by + disabling **Validate Alembic Options Default**. + + If not you may use the "Repair" action to revert all the options to + their default values. + + """ + ) + + @classmethod + def repair(cls, instance): + # Find create instance twin. + create_context = instance.context.data["create_context"] + create_instance = create_context.get_instance_by_id( + instance.data["instance_id"] + ) + + # Set the settings values on the create context then save to workfile. + settings = cls._get_settings(instance.context) + attributes = cls._get_publish_attributes(create_instance) + for key in attributes: + if key not in settings: + # This may occur if attributes have changed over time and an + # existing instance has older legacy attributes that do not + # match the current settings definition. + cls.log.warning( + "Publish attribute %s not found in Alembic Export " + "default settings. Ignoring repair for attribute.", + key + ) + continue + attributes[key] = settings[key] + + create_context.save_changes() + + +class ValidateAlembicDefaultsAnimation( + ValidateAlembicDefaultsPointcache +): + """Validate the attributes on the instance are defaults. + + The defaults are defined in the project settings. + """ + label = "Validate Alembic Options Defaults" + families = ["animation"] + plugin_name = "ExtractAnimation" diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py index 92b4922492..8574b3ecc8 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -1,30 +1,56 @@ +from maya import cmds + import pyblish.api + from ayon_core.pipeline.publish import ( ValidateContentsOrder, PublishValidationError ) +from ayon_core.hosts.maya.api.lib import is_visible class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): """Validate Arnold Scene Source. - We require at least 1 root node/parent for the meshes. This is to ensure we - can duplicate the nodes and preserve the names. + Ensure no nodes are hidden. + """ - If using proxies we need the nodes to share the same names and not be + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass", "assProxy"] + label = "Validate Arnold Scene Source" + + def process(self, instance): + # Validate against having nodes hidden, which will result in the + # extraction to ignore the node. + nodes = instance.data["members"] + instance.data.get("proxy", []) + nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')] + hidden_nodes = [ + x for x in nodes if not is_visible(x, intermediateObject=False) + ] + if hidden_nodes: + raise PublishValidationError( + "Found hidden nodes:\n\n{}\n\nPlease unhide for" + " publishing.".format("\n".join(hidden_nodes)) + ) + + +class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source Proxy. + + When using proxies we need the nodes to share the same names and not be parent to the world. This ends up needing at least two groups with content nodes and proxy nodes in another. """ order = ValidateContentsOrder hosts = ["maya"] - families = ["ass"] - label = "Validate Arnold Scene Source" + families = ["assProxy"] + label = "Validate Arnold Scene Source Proxy" def _get_nodes_by_name(self, nodes): ungrouped_nodes = [] nodes_by_name = {} parents = [] - same_named_nodes = {} for node in nodes: node_split = node.split("|") if len(node_split) == 2: @@ -35,33 +61,16 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): parents.append(parent) node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] - - # Check for same same nodes, which can happen in different - # hierarchies. - if node_name in nodes_by_name: - try: - same_named_nodes[node_name].append(node) - except KeyError: - same_named_nodes[node_name] = [ - nodes_by_name[node_name], node - ] - nodes_by_name[node_name] = node - if same_named_nodes: - message = "Found nodes with the same name:" - for name, nodes in same_named_nodes.items(): - message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes)) - - raise PublishValidationError(message) - return ungrouped_nodes, nodes_by_name, parents def process(self, instance): + # Validate against nodes directly parented to world. ungrouped_nodes = [] nodes, content_nodes_by_name, content_parents = ( - self._get_nodes_by_name(instance.data["contentMembers"]) + self._get_nodes_by_name(instance.data["members"]) ) ungrouped_nodes.extend(nodes) @@ -70,24 +79,21 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): ) ungrouped_nodes.extend(nodes) - # Validate against nodes directly parented to world. if ungrouped_nodes: raise PublishValidationError( "Found nodes parented to the world: {}\n" "All nodes need to be grouped.".format(ungrouped_nodes) ) - # Proxy validation. - if not instance.data.get("proxy", []): - return - # Validate for content and proxy nodes amount being the same. - if len(instance.data["contentMembers"]) != len(instance.data["proxy"]): + if len(instance.data["members"]) != len(instance.data["proxy"]): raise PublishValidationError( "Amount of content nodes ({}) and proxy nodes ({}) needs to " - "be the same.".format( - len(instance.data["contentMembers"]), - len(instance.data["proxy"]) + "be the same.\nContent nodes: {}\nProxy nodes:{}".format( + len(instance.data["members"]), + len(instance.data["proxy"]), + instance.data["members"], + instance.data["proxy"] ) ) diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py index a9d896952d..e5dbe178fc 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py @@ -17,7 +17,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, order = ValidateContentsOrder hosts = ["maya"] - families = ["ass"] + families = ["assProxy"] label = "Validate Arnold Scene Source CBID" actions = [RepairAction] optional = False @@ -40,15 +40,11 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, @classmethod def get_invalid_couples(cls, instance): - content_nodes_by_name = cls._get_nodes_by_name( - instance.data["contentMembers"] - ) - proxy_nodes_by_name = cls._get_nodes_by_name( - instance.data.get("proxy", []) - ) + nodes_by_name = cls._get_nodes_by_name(instance.data["members"]) + proxy_nodes_by_name = cls._get_nodes_by_name(instance.data["proxy"]) invalid_couples = [] - for content_name, content_node in content_nodes_by_name.items(): + for content_name, content_node in nodes_by_name.items(): proxy_node = proxy_nodes_by_name.get(content_name, None) if not proxy_node: @@ -70,7 +66,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin, if not self.is_active(instance.data): return # Proxy validation. - if not instance.data.get("proxy", []): + if not instance.data["proxy"]: return # Validate for proxy nodes sharing the same cbId as content nodes. diff --git a/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py index 78a247b3f2..7badfdc027 100644 --- a/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/client/ayon_core/hosts/maya/plugins/publish/validate_rendersettings.py @@ -10,6 +10,7 @@ from ayon_core.pipeline.publish import ( RepairAction, ValidateContentsOrder, PublishValidationError, + OptionalPyblishPluginMixin ) from ayon_core.hosts.maya.api import lib from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings @@ -37,7 +38,8 @@ def get_redshift_image_format_labels(): return mel.eval("{0}={0}".format(var)) -class ValidateRenderSettings(pyblish.api.InstancePlugin): +class ValidateRenderSettings(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validates the global render settings * File Name Prefix must start with: `` @@ -55,7 +57,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): * Frame Padding must be: * default: 4 - * Animation must be toggle on, in Render Settings - Common tab: + * Animation must be toggled on, in Render Settings - Common tab: * vray: Animation on standard of specific * arnold: Frame / Animation ext: Any choice without "(Single Frame)" * redshift: Animation toggled on @@ -67,10 +69,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): """ order = ValidateContentsOrder - label = "Render Settings" + label = "Validate Render Settings" hosts = ["maya"] families = ["renderlayer"] actions = [RepairAction] + optional = True ImagePrefixes = { 'mentalray': 'defaultRenderGlobals.imageFilePrefix', @@ -112,6 +115,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): DEFAULT_PREFIX = "//_" def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..b07c7e9a70 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,132 @@ +from maya import cmds + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + PlaceholderLoadMixin, + LoadPlaceholderItem +) +from ayon_core.hosts.maya.api.lib import ( + get_container_transforms, + get_node_parent, + get_node_index_under_parent +) +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaPlaceholderPlugin, +) + + +class MayaPlaceholderLoadPlugin(MayaPlaceholderPlugin, PlaceholderLoadMixin): + identifier = "maya.load" + label = "Maya load" + + item_class = LoadPlaceholderItem + + def _create_placeholder_name(self, placeholder_data): + + # Split builder type: context_assets, linked_assets, all_assets + prefix, suffix = placeholder_data["builder_type"].split("_", 1) + parts = [prefix] + + # add family if any + placeholder_product_type = placeholder_data.get("product_type") + if placeholder_product_type is None: + placeholder_product_type = placeholder_data.get("family") + + if placeholder_product_type: + parts.append(placeholder_product_type) + + # add loader arguments if any + loader_args = placeholder_data["loader_args"] + if loader_args: + loader_args = eval(loader_args) + for value in loader_args.values(): + parts.append(str(value)) + + parts.append(suffix) + placeholder_name = "_".join(parts) + + return placeholder_name.capitalize() + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + try: + containers = cmds.sets("AVALON_CONTAINERS", q=True) + except ValueError: + containers = [] + + loaded_representation_ids = { + cmds.getAttr(container + ".representation") + for container in containers + } + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def load_succeed(self, placeholder, container): + self._parent_in_hierarchy(placeholder, container) + + def _parent_in_hierarchy(self, placeholder, container): + """Parent loaded container to placeholder's parent. + + ie : Set loaded content as placeholder's sibling + + Args: + container (str): Placeholder loaded containers + """ + + if not container: + return + + # TODO: This currently returns only a single root but a loaded scene + # could technically load more than a single root + container_root = get_container_transforms(container, root=True) + + # Bugfix: The get_container_transforms does not recognize the load + # reference group currently + # TODO: Remove this when it does + parent = get_node_parent(container_root) + if parent: + container_root = parent + roots = [container_root] + + # Add the loaded roots to the holding sets if they exist + holding_sets = cmds.listSets(object=placeholder.scene_identifier) or [] + for holding_set in holding_sets: + cmds.sets(roots, forceElement=holding_set) + + # Parent the roots to the place of the placeholder locator and match + # its matrix + placeholder_form = cmds.xform( + placeholder.scene_identifier, + query=True, + matrix=True, + worldSpace=True + ) + scene_parent = get_node_parent(placeholder.scene_identifier) + for node in set(roots): + cmds.xform(node, matrix=placeholder_form, worldSpace=True) + + if scene_parent != get_node_parent(node): + if scene_parent: + node = cmds.parent(node, scene_parent)[0] + else: + node = cmds.parent(node, world=True)[0] + + # Move loaded nodes in index order next to their placeholder node + cmds.reorder(node, back=True) + index = get_node_index_under_parent(placeholder.scene_identifier) + cmds.reorder(node, front=True) + cmds.reorder(node, relative=index + 1) diff --git a/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py new file mode 100644 index 0000000000..62e10ba023 --- /dev/null +++ b/client/ayon_core/hosts/maya/plugins/workfile_build/script_placeholder.py @@ -0,0 +1,201 @@ +from maya import cmds + +from ayon_core.hosts.maya.api.workfile_template_builder import ( + MayaPlaceholderPlugin +) +from ayon_core.lib import NumberDef, TextDef, EnumDef +from ayon_core.lib.events import weakref_partial + + +EXAMPLE_SCRIPT = """ +# Access maya commands +from maya import cmds + +# Access the placeholder node +placeholder_node = placeholder.scene_identifier + +# Access the event callback +if event is None: + print(f"Populating {placeholder}") +else: + if event.topic == "template.depth_processed": + print(f"Processed depth: {event.get('depth')}") + elif event.topic == "template.finished": + print("Build finished.") +""".strip() + + +class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin): + """Execute a script at the given `order` during workfile build. + + This is a very low-level placeholder to run Python scripts at a given + point in time during the workfile template build. + + It can create either a locator or an objectSet as placeholder node. + It defaults to an objectSet, since allowing to run on e.g. other + placeholder node members can be useful, e.g. using: + + >>> members = cmds.sets(placeholder.scene_identifier, query=True) + + """ + + identifier = "maya.runscript" + label = "Run Python Script" + + use_selection_as_parent = False + + def get_placeholder_options(self, options=None): + options = options or {} + return [ + NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines asset loading priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ), + TextDef( + "prepare_script", + label="Run at\nprepare", + tooltip="Run before populate at prepare order", + multiline=True, + default=options.get("prepare_script", "") + ), + TextDef( + "populate_script", + label="Run at\npopulate", + tooltip="Run script at populate node order
" + "This is the default behavior", + multiline=True, + default=options.get("populate_script", EXAMPLE_SCRIPT) + ), + TextDef( + "depth_processed_script", + label="Run after\ndepth\niteration", + tooltip="Run script after every build depth iteration", + multiline=True, + default=options.get("depth_processed_script", "") + ), + TextDef( + "finished_script", + label="Run after\nbuild", + tooltip=( + "Run script at build finished.
" + "Note: this even runs if other placeholders had " + "errors during the build" + ), + multiline=True, + default=options.get("finished_script", "") + ), + EnumDef( + "create_nodetype", + label="Nodetype", + items={ + "spaceLocator": "Locator", + "objectSet": "ObjectSet" + }, + tooltip=( + "The placeholder's node type to be created.
" + "Note this only works on create, not on update" + ), + default=options.get("create_nodetype", "objectSet") + ), + ] + + def create_placeholder(self, placeholder_data): + nodetype = placeholder_data.get("create_nodetype", "objectSet") + + if nodetype == "spaceLocator": + super(MayaPlaceholderScriptPlugin, self).create_placeholder( + placeholder_data + ) + elif nodetype == "objectSet": + placeholder_data["plugin_identifier"] = self.identifier + + # Create maya objectSet on selection + selection = cmds.ls(selection=True, long=True) + name = self._create_placeholder_name(placeholder_data) + node = cmds.sets(selection, name=name) + + self.imprint(node, placeholder_data) + + def prepare_placeholders(self, placeholders): + super(MayaPlaceholderScriptPlugin, self).prepare_placeholders( + placeholders + ) + for placeholder in placeholders: + prepare_script = placeholder.data.get("prepare_script") + if not prepare_script: + continue + + self.run_script(placeholder, prepare_script) + + def populate_placeholder(self, placeholder): + + populate_script = placeholder.data.get("populate_script") + depth_script = placeholder.data.get("depth_processed_script") + finished_script = placeholder.data.get("finished_script") + + # Run now + if populate_script: + self.run_script(placeholder, populate_script) + + if not any([depth_script, finished_script]): + # No callback scripts to run + if not placeholder.data.get("keep_placeholder", True): + self.delete_placeholder(placeholder) + return + + # Run at each depth processed + if depth_script: + callback = weakref_partial( + self.run_script, placeholder, depth_script) + self.builder.add_on_depth_processed_callback( + callback, order=placeholder.order) + + # Run at build finish + if finished_script: + callback = weakref_partial( + self.run_script, placeholder, finished_script) + self.builder.add_on_finished_callback( + callback, order=placeholder.order) + + # If placeholder should be deleted, delete it after finish so + # the scripts have access to it up to the last run + if not placeholder.data.get("keep_placeholder", True): + delete_callback = weakref_partial( + self.delete_placeholder, placeholder) + self.builder.add_on_finished_callback( + delete_callback, order=placeholder.order + 1) + + def run_script(self, placeholder, script, event=None): + """Run script + + Even though `placeholder` is an unused arguments by exposing it as + an input argument it means it makes it available through + globals()/locals() in the `exec` call, giving the script access + to the placeholder. + + For example: + >>> node = placeholder.scene_identifier + + In the case the script is running at a callback level (not during + populate) then it has access to the `event` as well, otherwise the + value is None if it runs during `populate_placeholder` directly. + + For example adding this as the callback script: + >>> if event is not None: + >>> if event.topic == "on_depth_processed": + >>> print(f"Processed depth: {event.get('depth')}") + >>> elif event.topic == "on_finished": + >>> print("Build finished.") + + """ + self.log.debug(f"Running script at event: {event}") + exec(script, locals()) diff --git a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py index 88ef4b201a..c1d9f019e4 100644 --- a/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py +++ b/client/ayon_core/hosts/maya/tools/mayalookassigner/vray_proxies.py @@ -7,7 +7,7 @@ from maya import cmds import ayon_api from ayon_core.pipeline import get_current_project_name -import ayon_core.hosts.maya.lib as maya_lib +import ayon_core.hosts.maya.api.lib as maya_lib from . import lib from .alembic import get_alembic_ids_cache diff --git a/client/ayon_core/hosts/nuke/api/lib.py b/client/ayon_core/hosts/nuke/api/lib.py index 78cbe85097..e3505a16f2 100644 --- a/client/ayon_core/hosts/nuke/api/lib.py +++ b/client/ayon_core/hosts/nuke/api/lib.py @@ -1495,18 +1495,28 @@ class WorkfileSettings(object): filter_knobs = [ "viewerProcess", - "wipe_position" + "wipe_position", + "monitorOutOutputTransform" ] + display, viewer = get_viewer_config_from_string( + viewer_dict["viewerProcess"] + ) + viewer_process = create_viewer_profile_string( + viewer, display, path_like=False + ) + display, viewer = get_viewer_config_from_string( + viewer_dict["output_transform"] + ) + output_transform = create_viewer_profile_string( + viewer, display, path_like=False + ) erased_viewers = [] for v in nuke.allNodes(filter="Viewer"): # set viewProcess to preset from settings - v["viewerProcess"].setValue( - str(viewer_dict["viewerProcess"]) - ) + v["viewerProcess"].setValue(viewer_process) - if str(viewer_dict["viewerProcess"]) \ - not in v["viewerProcess"].value(): + if viewer_process not in v["viewerProcess"].value(): copy_inputs = v.dependencies() copy_knobs = {k: v[k].value() for k in v.knobs() if k not in filter_knobs} @@ -1524,11 +1534,11 @@ class WorkfileSettings(object): # set copied knobs for k, v in copy_knobs.items(): - print(k, v) nv[k].setValue(v) # set viewerProcess - nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"])) + nv["viewerProcess"].setValue(viewer_process) + nv["monitorOutOutputTransform"].setValue(output_transform) if erased_viewers: log.warning( @@ -1547,7 +1557,6 @@ class WorkfileSettings(object): host_name="nuke" ) - viewer_process_settings = imageio_host["viewer"]["viewerProcess"] workfile_settings = imageio_host["workfile"] color_management = workfile_settings["color_management"] native_ocio_config = workfile_settings["native_ocio_config"] @@ -1574,29 +1583,6 @@ class WorkfileSettings(object): residual_path )) - # get monitor lut from settings respecting Nuke version differences - monitor_lut = workfile_settings["thumbnail_space"] - monitor_lut_data = self._get_monitor_settings( - viewer_process_settings, monitor_lut - ) - monitor_lut_data["workingSpaceLUT"] = ( - workfile_settings["working_space"] - ) - - # then set the rest - for knob, value_ in monitor_lut_data.items(): - # skip unfilled ocio config path - # it will be dict in value - if isinstance(value_, dict): - continue - # skip empty values - if not value_: - continue - if self._root_node[knob].value() not in value_: - self._root_node[knob].setValue(str(value_)) - log.debug("nuke.root()['{}'] changed to: {}".format( - knob, value_)) - # set ocio config path if config_data: config_path = config_data["path"].replace("\\", "/") @@ -1611,6 +1597,31 @@ class WorkfileSettings(object): if correct_settings: self._set_ocio_config_path_to_workfile(config_data) + # get monitor lut from settings respecting Nuke version differences + monitor_lut_data = self._get_monitor_settings( + workfile_settings["monitor_out_lut"], + workfile_settings["monitor_lut"] + ) + monitor_lut_data.update({ + "workingSpaceLUT": workfile_settings["working_space"], + "int8Lut": workfile_settings["int_8_lut"], + "int16Lut": workfile_settings["int_16_lut"], + "logLut": workfile_settings["log_lut"], + "floatLut": workfile_settings["float_lut"] + }) + + # then set the rest + for knob, value_ in monitor_lut_data.items(): + # skip unfilled ocio config path + # it will be dict in value + if isinstance(value_, dict): + continue + # skip empty values + if not value_: + continue + self._root_node[knob].setValue(str(value_)) + log.debug("nuke.root()['{}'] changed to: {}".format(knob, value_)) + def _get_monitor_settings(self, viewer_lut, monitor_lut): """ Get monitor settings from viewer and monitor lut diff --git a/client/ayon_core/hosts/nuke/api/pipeline.py b/client/ayon_core/hosts/nuke/api/pipeline.py index 0d44aba2f9..d35a2e89e0 100644 --- a/client/ayon_core/hosts/nuke/api/pipeline.py +++ b/client/ayon_core/hosts/nuke/api/pipeline.py @@ -18,6 +18,7 @@ from ayon_core.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, register_inventory_action_path, + register_workfile_build_plugin_path, AYON_INSTANCE_ID, AVALON_INSTANCE_ID, AVALON_CONTAINER_ID, @@ -52,8 +53,6 @@ from .lib import ( MENU_LABEL, ) from .workfile_template_builder import ( - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin, build_workfile_template, create_placeholder, update_placeholder, @@ -76,6 +75,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build") # registering pyblish gui regarding settings in presets if os.getenv("PYBLISH_GUI", None): @@ -105,18 +105,11 @@ class NukeHost( def get_workfile_extensions(self): return file_extensions() - def get_workfile_build_placeholder_plugins(self): - return [ - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin - ] - def get_containers(self): return ls() def install(self): - ''' Installing all requarements for Nuke host - ''' + """Installing all requirements for Nuke host""" pyblish.api.register_host("nuke") @@ -125,6 +118,7 @@ class NukeHost( register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) register_inventory_action_path(INVENTORY_PATH) + register_workfile_build_plugin_path(WORKFILE_BUILD_PATH) # Register AYON event for workfiles loading. register_event_callback("workio.open_file", check_inventory_versions) @@ -178,7 +172,6 @@ def add_nuke_callbacks(): # set apply all workfile settings on script load and save nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) - if nuke_settings["dirmap"]["enabled"]: log.info("Added Nuke's dir-mapping callback ...") # Add dirmap for file paths. diff --git a/client/ayon_core/hosts/nuke/api/plugin.py b/client/ayon_core/hosts/nuke/api/plugin.py index 5b97fab0c2..fb56dec833 100644 --- a/client/ayon_core/hosts/nuke/api/plugin.py +++ b/client/ayon_core/hosts/nuke/api/plugin.py @@ -1151,7 +1151,6 @@ def _remove_old_knobs(node): "OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority", "deadlineChunkSize", "deadlineConcurrentTasks", "Deadline" ] - print(node.name()) # remove all old knobs for knob in node.allKnobs(): diff --git a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py index 495edd9e5f..aebf91c4a4 100644 --- a/client/ayon_core/hosts/nuke/api/workfile_template_builder.py +++ b/client/ayon_core/hosts/nuke/api/workfile_template_builder.py @@ -1,30 +1,17 @@ import collections import nuke + from ayon_core.pipeline import registered_host from ayon_core.pipeline.workfile.workfile_template_builder import ( AbstractTemplateBuilder, PlaceholderPlugin, - LoadPlaceholderItem, - CreatePlaceholderItem, - PlaceholderLoadMixin, - PlaceholderCreateMixin, ) from ayon_core.tools.workfile_template_build import ( WorkfileBuildPlaceholderDialog, ) from .lib import ( - find_free_space_to_paste_nodes, - get_extreme_positions, - get_group_io_nodes, imprint, - refresh_node, - refresh_nodes, reset_selection, - get_names_from_nodes, - get_nodes_by_names, - select_nodes, - duplicate_node, - node_tempfile, get_main_window, WorkfileSettings, ) @@ -54,6 +41,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder): return True + class NukePlaceholderPlugin(PlaceholderPlugin): node_color = 4278190335 @@ -120,843 +108,6 @@ class NukePlaceholderPlugin(PlaceholderPlugin): nuke.delete(placeholder_node) -class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): - identifier = "nuke.load" - label = "Nuke load" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderLoadPlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _get_loaded_repre_ids(self): - loaded_representation_ids = self.builder.get_shared_populate_data( - "loaded_representation_ids" - ) - if loaded_representation_ids is None: - loaded_representation_ids = set() - for node in nuke.allNodes(): - if "repre_id" in node.knobs(): - loaded_representation_ids.add( - node.knob("repre_id").getValue() - ) - - self.builder.set_shared_populate_data( - "loaded_representation_ids", loaded_representation_ids - ) - return loaded_representation_ids - - def _before_placeholder_load(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def _before_repre_load(self, placeholder, representation): - placeholder.data["last_repre_id"] = representation["id"] - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - # TODO do data validations and maybe updgrades if are invalid - output.append( - LoadPlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_load_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - repre_ids = self._get_loaded_repre_ids() - self.populate_load_placeholder(placeholder, repre_ids) - - def get_placeholder_options(self, options=None): - return self.get_load_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - # TODO get from shared populate data! - nodes_init = placeholder.data["nodes_init"] - nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Loaded nodes: {}".format(nodes_loaded)) - if not nodes_loaded: - return - - placeholder.data["delete"] = True - - nodes_loaded = self._move_to_placeholder_group( - placeholder, nodes_loaded - ) - placeholder.data["last_loaded"] = nodes_loaded - refresh_nodes(nodes_loaded) - - # positioning of the loaded nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_loaded) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of loaded nodes - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) - - self._set_loaded_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new loaded nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_loaded) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_loaded, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the loaded - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_loaded, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_loaded: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_loaded): - """ - opening the placeholder's group and copying loaded nodes in it. - - Returns : - nodes_loaded (list): the new list of pasted nodes - """ - - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_loaded) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_loaded = nuke.selectedNodes() - return nodes_loaded - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is loaded.""" - - nodes_loaded = placeholder.data["last_loaded"] - loaded_backdrops = [] - bd_orders = set() - for node in nodes_loaded: - if isinstance(node, nuke.BackdropNode): - loaded_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in loaded_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes loaded with it) - - add Id to the attributes of all the other nodes - """ - - loaded_nodes = placeholder.data["last_loaded"] - loaded_nodes_set = set(loaded_nodes) - data = {"repre_id": str(placeholder.data["last_repre_id"])} - - for node in loaded_nodes: - node_knobs = node.knobs() - if "builder_type" not in node_knobs: - # save the id of representation for all imported nodes - imprint(node, data) - node.knob("repre_id").setVisible(False) - refresh_node(node) - continue - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(loaded_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_loaded_connections(self, placeholder): - """ - set inputs and outputs of loaded nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - loaded with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_loaded"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - -class NukePlaceholderCreatePlugin( - NukePlaceholderPlugin, PlaceholderCreateMixin -): - identifier = "nuke.create" - label = "Nuke create" - - def _parse_placeholder_node_data(self, node): - placeholder_data = super( - NukePlaceholderCreatePlugin, self - )._parse_placeholder_node_data(node) - - node_knobs = node.knobs() - nb_children = 0 - if "nb_children" in node_knobs: - nb_children = int(node_knobs["nb_children"].getValue()) - placeholder_data["nb_children"] = nb_children - - siblings = [] - if "siblings" in node_knobs: - siblings = node_knobs["siblings"].values() - placeholder_data["siblings"] = siblings - - node_full_name = node.fullName() - placeholder_data["group_name"] = node_full_name.rpartition(".")[0] - placeholder_data["last_loaded"] = [] - placeholder_data["delete"] = False - return placeholder_data - - def _before_instance_create(self, placeholder): - placeholder.data["nodes_init"] = nuke.allNodes() - - def collect_placeholders(self): - output = [] - scene_placeholders = self._collect_scene_placeholders() - for node_name, node in scene_placeholders.items(): - plugin_identifier_knob = node.knob("plugin_identifier") - if ( - plugin_identifier_knob is None - or plugin_identifier_knob.getValue() != self.identifier - ): - continue - - placeholder_data = self._parse_placeholder_node_data(node) - - output.append( - CreatePlaceholderItem(node_name, placeholder_data, self) - ) - - return output - - def populate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def repopulate_placeholder(self, placeholder): - self.populate_create_placeholder(placeholder) - - def get_placeholder_options(self, options=None): - return self.get_create_plugin_options(options) - - def post_placeholder_process(self, placeholder, failed): - """Cleanup placeholder after load of its corresponding representations. - - Args: - placeholder (PlaceholderItem): Item which was just used to load - representation. - failed (bool): Loading of representation failed. - """ - # deselect all selected nodes - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - # getting the latest nodes added - nodes_init = placeholder.data["nodes_init"] - nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) - self.log.debug("Created nodes: {}".format(nodes_created)) - if not nodes_created: - return - - placeholder.data["delete"] = True - - nodes_created = self._move_to_placeholder_group( - placeholder, nodes_created - ) - placeholder.data["last_created"] = nodes_created - refresh_nodes(nodes_created) - - # positioning of the created nodes - min_x, min_y, _, _ = get_extreme_positions(nodes_created) - for node in nodes_created: - xpos = (node.xpos() - min_x) + placeholder_node.xpos() - ypos = (node.ypos() - min_y) + placeholder_node.ypos() - node.setXYpos(xpos, ypos) - refresh_nodes(nodes_created) - - # fix the problem of z_order for backdrops - self._fix_z_order(placeholder) - - if placeholder.data.get("keep_placeholder"): - self._imprint_siblings(placeholder) - - if placeholder.data["nb_children"] == 0: - # save initial nodes positions and dimensions, update them - # and set inputs and outputs of created nodes - - if placeholder.data.get("keep_placeholder"): - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_created) - - self._set_created_connections(placeholder) - - elif placeholder.data["siblings"]: - # create copies of placeholder siblings for the new created nodes, - # set their inputs and outputs and update all nodes positions and - # dimensions and siblings names - - siblings = get_nodes_by_names(placeholder.data["siblings"]) - refresh_nodes(siblings) - copies = self._create_sib_copies(placeholder) - new_nodes = list(copies.values()) # copies nodes - self._update_nodes(new_nodes, nodes_created) - placeholder_node.removeKnob(placeholder_node.knob("siblings")) - new_nodes_name = get_names_from_nodes(new_nodes) - imprint(placeholder_node, {"siblings": new_nodes_name}) - self._set_copies_connections(placeholder, copies) - - self._update_nodes( - nuke.allNodes(), - new_nodes + nodes_created, - 20 - ) - - new_siblings = get_names_from_nodes(new_nodes) - placeholder.data["siblings"] = new_siblings - - else: - # if the placeholder doesn't have siblings, the created - # nodes will be placed in a free space - - xpointer, ypointer = find_free_space_to_paste_nodes( - nodes_created, direction="bottom", offset=200 - ) - node = nuke.createNode("NoOp") - reset_selection() - nuke.delete(node) - for node in nodes_created: - xpos = (node.xpos() - min_x) + xpointer - ypos = (node.ypos() - min_y) + ypointer - node.setXYpos(xpos, ypos) - - placeholder.data["nb_children"] += 1 - reset_selection() - - # go back to root group - nuke.root().begin() - - def _move_to_placeholder_group(self, placeholder, nodes_created): - """ - opening the placeholder's group and copying created nodes in it. - - Returns : - nodes_created (list): the new list of pasted nodes - """ - groups_name = placeholder.data["group_name"] - reset_selection() - select_nodes(nodes_created) - if groups_name: - with node_tempfile() as filepath: - nuke.nodeCopy(filepath) - for node in nuke.selectedNodes(): - nuke.delete(node) - group = nuke.toNode(groups_name) - group.begin() - nuke.nodePaste(filepath) - nodes_created = nuke.selectedNodes() - return nodes_created - - def _fix_z_order(self, placeholder): - """Fix the problem of z_order when a backdrop is create.""" - - nodes_created = placeholder.data["last_created"] - created_backdrops = [] - bd_orders = set() - for node in nodes_created: - if isinstance(node, nuke.BackdropNode): - created_backdrops.append(node) - bd_orders.add(node.knob("z_order").getValue()) - - if not bd_orders: - return - - sib_orders = set() - for node_name in placeholder.data["siblings"]: - node = nuke.toNode(node_name) - if isinstance(node, nuke.BackdropNode): - sib_orders.add(node.knob("z_order").getValue()) - - if not sib_orders: - return - - min_order = min(bd_orders) - max_order = max(sib_orders) - for backdrop_node in created_backdrops: - z_order = backdrop_node.knob("z_order").getValue() - backdrop_node.knob("z_order").setValue( - z_order + max_order - min_order + 1) - - def _imprint_siblings(self, placeholder): - """ - - add siblings names to placeholder attributes (nodes created with it) - - add Id to the attributes of all the other nodes - """ - - created_nodes = placeholder.data["last_created"] - created_nodes_set = set(created_nodes) - - for node in created_nodes: - node_knobs = node.knobs() - - if ( - "is_placeholder" not in node_knobs - or ( - "is_placeholder" in node_knobs - and node.knob("is_placeholder").value() - ) - ): - siblings = list(created_nodes_set - {node}) - siblings_name = get_names_from_nodes(siblings) - siblings = {"siblings": siblings_name} - imprint(node, siblings) - - def _imprint_inits(self): - """Add initial positions and dimensions to the attributes""" - - for node in nuke.allNodes(): - refresh_node(node) - imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) - node.knob("x_init").setVisible(False) - node.knob("y_init").setVisible(False) - width = node.screenWidth() - height = node.screenHeight() - if "bdwidth" in node.knobs(): - imprint(node, {"w_init": width, "h_init": height}) - node.knob("w_init").setVisible(False) - node.knob("h_init").setVisible(False) - refresh_node(node) - - def _update_nodes( - self, placeholder, nodes, considered_nodes, offset_y=None - ): - """Adjust backdrop nodes dimensions and positions. - - Considering some nodes sizes. - - Args: - nodes (list): list of nodes to update - considered_nodes (list): list of nodes to consider while updating - positions and dimensions - offset (int): distance between copies - """ - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - - min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) - - diff_x = diff_y = 0 - contained_nodes = [] # for backdrops - - if offset_y is None: - width_ph = placeholder_node.screenWidth() - height_ph = placeholder_node.screenHeight() - diff_y = max_y - min_y - height_ph - diff_x = max_x - min_x - width_ph - contained_nodes = [placeholder_node] - min_x = placeholder_node.xpos() - min_y = placeholder_node.ypos() - else: - siblings = get_nodes_by_names(placeholder.data["siblings"]) - minX, _, maxX, _ = get_extreme_positions(siblings) - diff_y = max_y - min_y + 20 - diff_x = abs(max_x - min_x - maxX + minX) - contained_nodes = considered_nodes - - if diff_y <= 0 and diff_x <= 0: - return - - for node in nodes: - refresh_node(node) - - if ( - node == placeholder_node - or node in considered_nodes - ): - continue - - if ( - not isinstance(node, nuke.BackdropNode) - or ( - isinstance(node, nuke.BackdropNode) - and not set(contained_nodes) <= set(node.getNodes()) - ) - ): - if offset_y is None and node.xpos() >= min_x: - node.setXpos(node.xpos() + diff_x) - - if node.ypos() >= min_y: - node.setYpos(node.ypos() + diff_y) - - else: - width = node.screenWidth() - height = node.screenHeight() - node.knob("bdwidth").setValue(width + diff_x) - node.knob("bdheight").setValue(height + diff_y) - - refresh_node(node) - - def _set_created_connections(self, placeholder): - """ - set inputs and outputs of created nodes""" - - placeholder_node = nuke.toNode(placeholder.scene_identifier) - input_node, output_node = get_group_io_nodes( - placeholder.data["last_created"] - ) - for node in placeholder_node.dependent(): - for idx in range(node.inputs()): - if node.input(idx) == placeholder_node and output_node: - node.setInput(idx, output_node) - - for node in placeholder_node.dependencies(): - for idx in range(placeholder_node.inputs()): - if placeholder_node.input(idx) == node and input_node: - input_node.setInput(0, node) - - def _create_sib_copies(self, placeholder): - """ creating copies of the palce_holder siblings (the ones who were - created with it) for the new nodes added - - Returns : - copies (dict) : with copied nodes names and their copies - """ - - copies = {} - siblings = get_nodes_by_names(placeholder.data["siblings"]) - for node in siblings: - new_node = duplicate_node(node) - - x_init = int(new_node.knob("x_init").getValue()) - y_init = int(new_node.knob("y_init").getValue()) - new_node.setXYpos(x_init, y_init) - if isinstance(new_node, nuke.BackdropNode): - w_init = new_node.knob("w_init").getValue() - h_init = new_node.knob("h_init").getValue() - new_node.knob("bdwidth").setValue(w_init) - new_node.knob("bdheight").setValue(h_init) - refresh_node(node) - - if "repre_id" in node.knobs().keys(): - node.removeKnob(node.knob("repre_id")) - copies[node.name()] = new_node - return copies - - def _set_copies_connections(self, placeholder, copies): - """Set inputs and outputs of the copies. - - Args: - copies (dict): Copied nodes by their names. - """ - - last_input, last_output = get_group_io_nodes( - placeholder.data["last_created"] - ) - siblings = get_nodes_by_names(placeholder.data["siblings"]) - siblings_input, siblings_output = get_group_io_nodes(siblings) - copy_input = copies[siblings_input.name()] - copy_output = copies[siblings_output.name()] - - for node_init in siblings: - if node_init == siblings_output: - continue - - node_copy = copies[node_init.name()] - for node in node_init.dependent(): - for idx in range(node.inputs()): - if node.input(idx) != node_init: - continue - - if node in siblings: - copies[node.name()].setInput(idx, node_copy) - else: - last_input.setInput(0, node_copy) - - for node in node_init.dependencies(): - for idx in range(node_init.inputs()): - if node_init.input(idx) != node: - continue - - if node_init == siblings_input: - copy_input.setInput(idx, node) - elif node in siblings: - node_copy.setInput(idx, copies[node.name()]) - else: - node_copy.setInput(idx, last_output) - - siblings_input.setInput(0, copy_output) - - def build_workfile_template(*args, **kwargs): builder = NukeTemplateBuilder(registered_host()) builder.build_template(*args, **kwargs) diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py new file mode 100644 index 0000000000..a5490021e4 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/create_placeholder.py @@ -0,0 +1,428 @@ +import nuke + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + CreatePlaceholderItem, + PlaceholderCreateMixin, +) +from ayon_core.hosts.nuke.api.lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, +) +from ayon_core.hosts.nuke.api.workfile_template_builder import ( + NukePlaceholderPlugin +) + + +class NukePlaceholderCreatePlugin( + NukePlaceholderPlugin, PlaceholderCreateMixin +): + identifier = "nuke.create" + label = "Nuke create" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderCreatePlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _before_instance_create(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + + output.append( + CreatePlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + self.populate_create_placeholder(placeholder) + + def get_placeholder_options(self, options=None): + return self.get_create_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + nodes_init = placeholder.data["nodes_init"] + nodes_created = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Created nodes: {}".format(nodes_created)) + if not nodes_created: + return + + placeholder.data["delete"] = True + + nodes_created = self._move_to_placeholder_group( + placeholder, nodes_created + ) + placeholder.data["last_created"] = nodes_created + refresh_nodes(nodes_created) + + # positioning of the created nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_created) + for node in nodes_created: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_created) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of created nodes + + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_created) + + self._set_created_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new created nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_created) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_created, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the created + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_created, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_created: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_created): + """ + opening the placeholder's group and copying created nodes in it. + + Returns : + nodes_created (list): the new list of pasted nodes + """ + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_created) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_created = nuke.selectedNodes() + return nodes_created + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is create.""" + + nodes_created = placeholder.data["last_created"] + created_backdrops = [] + bd_orders = set() + for node in nodes_created: + if isinstance(node, nuke.BackdropNode): + created_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in created_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes created with it) + - add Id to the attributes of all the other nodes + """ + + created_nodes = placeholder.data["last_created"] + created_nodes_set = set(created_nodes) + + for node in created_nodes: + node_knobs = node.knobs() + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(created_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_created_connections(self, placeholder): + """ + set inputs and outputs of created nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_created"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + created with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_created"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) diff --git a/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py new file mode 100644 index 0000000000..258f48c9d3 --- /dev/null +++ b/client/ayon_core/hosts/nuke/plugins/workfile_build/load_placeholder.py @@ -0,0 +1,455 @@ +import nuke + +from ayon_core.pipeline.workfile.workfile_template_builder import ( + LoadPlaceholderItem, + PlaceholderLoadMixin, +) +from ayon_core.hosts.nuke.api.lib import ( + find_free_space_to_paste_nodes, + get_extreme_positions, + get_group_io_nodes, + imprint, + refresh_node, + refresh_nodes, + reset_selection, + get_names_from_nodes, + get_nodes_by_names, + select_nodes, + duplicate_node, + node_tempfile, +) +from ayon_core.hosts.nuke.api.workfile_template_builder import ( + NukePlaceholderPlugin +) + + +class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): + identifier = "nuke.load" + label = "Nuke load" + + def _parse_placeholder_node_data(self, node): + placeholder_data = super( + NukePlaceholderLoadPlugin, self + )._parse_placeholder_node_data(node) + + node_knobs = node.knobs() + nb_children = 0 + if "nb_children" in node_knobs: + nb_children = int(node_knobs["nb_children"].getValue()) + placeholder_data["nb_children"] = nb_children + + siblings = [] + if "siblings" in node_knobs: + siblings = node_knobs["siblings"].values() + placeholder_data["siblings"] = siblings + + node_full_name = node.fullName() + placeholder_data["group_name"] = node_full_name.rpartition(".")[0] + placeholder_data["last_loaded"] = [] + placeholder_data["delete"] = False + return placeholder_data + + def _get_loaded_repre_ids(self): + loaded_representation_ids = self.builder.get_shared_populate_data( + "loaded_representation_ids" + ) + if loaded_representation_ids is None: + loaded_representation_ids = set() + for node in nuke.allNodes(): + if "repre_id" in node.knobs(): + loaded_representation_ids.add( + node.knob("repre_id").getValue() + ) + + self.builder.set_shared_populate_data( + "loaded_representation_ids", loaded_representation_ids + ) + return loaded_representation_ids + + def _before_placeholder_load(self, placeholder): + placeholder.data["nodes_init"] = nuke.allNodes() + + def _before_repre_load(self, placeholder, representation): + placeholder.data["last_repre_id"] = representation["id"] + + def collect_placeholders(self): + output = [] + scene_placeholders = self._collect_scene_placeholders() + for node_name, node in scene_placeholders.items(): + plugin_identifier_knob = node.knob("plugin_identifier") + if ( + plugin_identifier_knob is None + or plugin_identifier_knob.getValue() != self.identifier + ): + continue + + placeholder_data = self._parse_placeholder_node_data(node) + # TODO do data validations and maybe updgrades if are invalid + output.append( + LoadPlaceholderItem(node_name, placeholder_data, self) + ) + + return output + + def populate_placeholder(self, placeholder): + self.populate_load_placeholder(placeholder) + + def repopulate_placeholder(self, placeholder): + repre_ids = self._get_loaded_repre_ids() + self.populate_load_placeholder(placeholder, repre_ids) + + def get_placeholder_options(self, options=None): + return self.get_load_plugin_options(options) + + def post_placeholder_process(self, placeholder, failed): + """Cleanup placeholder after load of its corresponding representations. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + # deselect all selected nodes + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + # getting the latest nodes added + # TODO get from shared populate data! + nodes_init = placeholder.data["nodes_init"] + nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init)) + self.log.debug("Loaded nodes: {}".format(nodes_loaded)) + if not nodes_loaded: + return + + placeholder.data["delete"] = True + + nodes_loaded = self._move_to_placeholder_group( + placeholder, nodes_loaded + ) + placeholder.data["last_loaded"] = nodes_loaded + refresh_nodes(nodes_loaded) + + # positioning of the loaded nodes + min_x, min_y, _, _ = get_extreme_positions(nodes_loaded) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + placeholder_node.xpos() + ypos = (node.ypos() - min_y) + placeholder_node.ypos() + node.setXYpos(xpos, ypos) + refresh_nodes(nodes_loaded) + + # fix the problem of z_order for backdrops + self._fix_z_order(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) + + if placeholder.data["nb_children"] == 0: + # save initial nodes positions and dimensions, update them + # and set inputs and outputs of loaded nodes + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) + + self._set_loaded_connections(placeholder) + + elif placeholder.data["siblings"]: + # create copies of placeholder siblings for the new loaded nodes, + # set their inputs and outputs and update all nodes positions and + # dimensions and siblings names + + siblings = get_nodes_by_names(placeholder.data["siblings"]) + refresh_nodes(siblings) + copies = self._create_sib_copies(placeholder) + new_nodes = list(copies.values()) # copies nodes + self._update_nodes(new_nodes, nodes_loaded) + placeholder_node.removeKnob(placeholder_node.knob("siblings")) + new_nodes_name = get_names_from_nodes(new_nodes) + imprint(placeholder_node, {"siblings": new_nodes_name}) + self._set_copies_connections(placeholder, copies) + + self._update_nodes( + nuke.allNodes(), + new_nodes + nodes_loaded, + 20 + ) + + new_siblings = get_names_from_nodes(new_nodes) + placeholder.data["siblings"] = new_siblings + + else: + # if the placeholder doesn't have siblings, the loaded + # nodes will be placed in a free space + + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes_loaded, direction="bottom", offset=200 + ) + node = nuke.createNode("NoOp") + reset_selection() + nuke.delete(node) + for node in nodes_loaded: + xpos = (node.xpos() - min_x) + xpointer + ypos = (node.ypos() - min_y) + ypointer + node.setXYpos(xpos, ypos) + + placeholder.data["nb_children"] += 1 + reset_selection() + + # go back to root group + nuke.root().begin() + + def _move_to_placeholder_group(self, placeholder, nodes_loaded): + """ + opening the placeholder's group and copying loaded nodes in it. + + Returns : + nodes_loaded (list): the new list of pasted nodes + """ + + groups_name = placeholder.data["group_name"] + reset_selection() + select_nodes(nodes_loaded) + if groups_name: + with node_tempfile() as filepath: + nuke.nodeCopy(filepath) + for node in nuke.selectedNodes(): + nuke.delete(node) + group = nuke.toNode(groups_name) + group.begin() + nuke.nodePaste(filepath) + nodes_loaded = nuke.selectedNodes() + return nodes_loaded + + def _fix_z_order(self, placeholder): + """Fix the problem of z_order when a backdrop is loaded.""" + + nodes_loaded = placeholder.data["last_loaded"] + loaded_backdrops = [] + bd_orders = set() + for node in nodes_loaded: + if isinstance(node, nuke.BackdropNode): + loaded_backdrops.append(node) + bd_orders.add(node.knob("z_order").getValue()) + + if not bd_orders: + return + + sib_orders = set() + for node_name in placeholder.data["siblings"]: + node = nuke.toNode(node_name) + if isinstance(node, nuke.BackdropNode): + sib_orders.add(node.knob("z_order").getValue()) + + if not sib_orders: + return + + min_order = min(bd_orders) + max_order = max(sib_orders) + for backdrop_node in loaded_backdrops: + z_order = backdrop_node.knob("z_order").getValue() + backdrop_node.knob("z_order").setValue( + z_order + max_order - min_order + 1) + + def _imprint_siblings(self, placeholder): + """ + - add siblings names to placeholder attributes (nodes loaded with it) + - add Id to the attributes of all the other nodes + """ + + loaded_nodes = placeholder.data["last_loaded"] + loaded_nodes_set = set(loaded_nodes) + data = {"repre_id": str(placeholder.data["last_repre_id"])} + + for node in loaded_nodes: + node_knobs = node.knobs() + if "builder_type" not in node_knobs: + # save the id of representation for all imported nodes + imprint(node, data) + node.knob("repre_id").setVisible(False) + refresh_node(node) + continue + + if ( + "is_placeholder" not in node_knobs + or ( + "is_placeholder" in node_knobs + and node.knob("is_placeholder").value() + ) + ): + siblings = list(loaded_nodes_set - {node}) + siblings_name = get_names_from_nodes(siblings) + siblings = {"siblings": siblings_name} + imprint(node, siblings) + + def _imprint_inits(self): + """Add initial positions and dimensions to the attributes""" + + for node in nuke.allNodes(): + refresh_node(node) + imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()}) + node.knob("x_init").setVisible(False) + node.knob("y_init").setVisible(False) + width = node.screenWidth() + height = node.screenHeight() + if "bdwidth" in node.knobs(): + imprint(node, {"w_init": width, "h_init": height}) + node.knob("w_init").setVisible(False) + node.knob("h_init").setVisible(False) + refresh_node(node) + + def _update_nodes( + self, placeholder, nodes, considered_nodes, offset_y=None + ): + """Adjust backdrop nodes dimensions and positions. + + Considering some nodes sizes. + + Args: + nodes (list): list of nodes to update + considered_nodes (list): list of nodes to consider while updating + positions and dimensions + offset (int): distance between copies + """ + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + + min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes) + + diff_x = diff_y = 0 + contained_nodes = [] # for backdrops + + if offset_y is None: + width_ph = placeholder_node.screenWidth() + height_ph = placeholder_node.screenHeight() + diff_y = max_y - min_y - height_ph + diff_x = max_x - min_x - width_ph + contained_nodes = [placeholder_node] + min_x = placeholder_node.xpos() + min_y = placeholder_node.ypos() + else: + siblings = get_nodes_by_names(placeholder.data["siblings"]) + minX, _, maxX, _ = get_extreme_positions(siblings) + diff_y = max_y - min_y + 20 + diff_x = abs(max_x - min_x - maxX + minX) + contained_nodes = considered_nodes + + if diff_y <= 0 and diff_x <= 0: + return + + for node in nodes: + refresh_node(node) + + if ( + node == placeholder_node + or node in considered_nodes + ): + continue + + if ( + not isinstance(node, nuke.BackdropNode) + or ( + isinstance(node, nuke.BackdropNode) + and not set(contained_nodes) <= set(node.getNodes()) + ) + ): + if offset_y is None and node.xpos() >= min_x: + node.setXpos(node.xpos() + diff_x) + + if node.ypos() >= min_y: + node.setYpos(node.ypos() + diff_y) + + else: + width = node.screenWidth() + height = node.screenHeight() + node.knob("bdwidth").setValue(width + diff_x) + node.knob("bdheight").setValue(height + diff_y) + + refresh_node(node) + + def _set_loaded_connections(self, placeholder): + """ + set inputs and outputs of loaded nodes""" + + placeholder_node = nuke.toNode(placeholder.scene_identifier) + input_node, output_node = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + for node in placeholder_node.dependent(): + for idx in range(node.inputs()): + if node.input(idx) == placeholder_node and output_node: + node.setInput(idx, output_node) + + for node in placeholder_node.dependencies(): + for idx in range(placeholder_node.inputs()): + if placeholder_node.input(idx) == node and input_node: + input_node.setInput(0, node) + + def _create_sib_copies(self, placeholder): + """ creating copies of the palce_holder siblings (the ones who were + loaded with it) for the new nodes added + + Returns : + copies (dict) : with copied nodes names and their copies + """ + + copies = {} + siblings = get_nodes_by_names(placeholder.data["siblings"]) + for node in siblings: + new_node = duplicate_node(node) + + x_init = int(new_node.knob("x_init").getValue()) + y_init = int(new_node.knob("y_init").getValue()) + new_node.setXYpos(x_init, y_init) + if isinstance(new_node, nuke.BackdropNode): + w_init = new_node.knob("w_init").getValue() + h_init = new_node.knob("h_init").getValue() + new_node.knob("bdwidth").setValue(w_init) + new_node.knob("bdheight").setValue(h_init) + refresh_node(node) + + if "repre_id" in node.knobs().keys(): + node.removeKnob(node.knob("repre_id")) + copies[node.name()] = new_node + return copies + + def _set_copies_connections(self, placeholder, copies): + """Set inputs and outputs of the copies. + + Args: + copies (dict): Copied nodes by their names. + """ + + last_input, last_output = get_group_io_nodes( + placeholder.data["last_loaded"] + ) + siblings = get_nodes_by_names(placeholder.data["siblings"]) + siblings_input, siblings_output = get_group_io_nodes(siblings) + copy_input = copies[siblings_input.name()] + copy_output = copies[siblings_output.name()] + + for node_init in siblings: + if node_init == siblings_output: + continue + + node_copy = copies[node_init.name()] + for node in node_init.dependent(): + for idx in range(node.inputs()): + if node.input(idx) != node_init: + continue + + if node in siblings: + copies[node.name()].setInput(idx, node_copy) + else: + last_input.setInput(0, node_copy) + + for node in node_init.dependencies(): + for idx in range(node_init.inputs()): + if node_init.input(idx) != node: + continue + + if node_init == siblings_input: + copy_input.setInput(idx, node) + elif node in siblings: + node_copy.setInput(idx, copies[node.name()]) + else: + node_copy.setInput(idx, last_output) + + siblings_input.setInput(0, copy_output) diff --git a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py index 26f2469844..a44c3490c6 100644 --- a/client/ayon_core/hosts/photoshop/plugins/create/create_image.py +++ b/client/ayon_core/hosts/photoshop/plugins/create/create_image.py @@ -35,8 +35,12 @@ class ImageCreator(Creator): create_empty_group = False stub = api.stub() # only after PS is up - top_level_selected_items = stub.get_selected_layers() if pre_create_data.get("use_selection"): + try: + top_level_selected_items = stub.get_selected_layers() + except ValueError: + raise CreatorError("Cannot group locked Background layer!") + only_single_item_selected = len(top_level_selected_items) == 1 if ( only_single_item_selected or @@ -50,11 +54,12 @@ class ImageCreator(Creator): group = stub.group_selected_layers(product_name_from_ui) groups_to_create.append(group) else: - stub.select_layers(stub.get_layers()) try: + stub.select_layers(stub.get_layers()) group = stub.group_selected_layers(product_name_from_ui) - except: + except ValueError: raise CreatorError("Cannot group locked Background layer!") + groups_to_create.append(group) # create empty group if nothing selected diff --git a/client/ayon_core/hosts/substancepainter/api/lib.py b/client/ayon_core/hosts/substancepainter/api/lib.py index 1cb480b552..64c39943ce 100644 --- a/client/ayon_core/hosts/substancepainter/api/lib.py +++ b/client/ayon_core/hosts/substancepainter/api/lib.py @@ -586,7 +586,6 @@ def prompt_new_file_with_mesh(mesh_filepath): # TODO: find a way to improve the process event to # load more complicated mesh app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000) - file_dialog.done(file_dialog.Accepted) app.processEvents(QtCore.QEventLoop.AllEvents) @@ -606,7 +605,7 @@ def prompt_new_file_with_mesh(mesh_filepath): mesh_select.setVisible(False) # Ensure UI is visually up-to-date - app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents) + app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 8000) # Trigger the 'select file' dialog to set the path and have the # new file dialog to use the path. @@ -623,8 +622,6 @@ def prompt_new_file_with_mesh(mesh_filepath): "Failed to set mesh path with the prompt dialog:" f"{mesh_filepath}\n\n" "Creating new project directly with the mesh path instead.") - else: - dialog.done(dialog.Accepted) new_action = _get_new_project_action() if not new_action: diff --git a/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py index 01cb65dd5c..d5aac1191c 100644 --- a/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py +++ b/client/ayon_core/hosts/substancepainter/plugins/load/load_mesh.py @@ -1,3 +1,5 @@ +import copy +from qtpy import QtWidgets, QtCore from ayon_core.pipeline import ( load, get_representation_path, @@ -8,10 +10,133 @@ from ayon_core.hosts.substancepainter.api.pipeline import ( set_container_metadata, remove_container_metadata ) -from ayon_core.hosts.substancepainter.api.lib import prompt_new_file_with_mesh import substance_painter.project -import qargparse + + +def _convert(substance_attr): + """Return Substance Painter Python API Project attribute from string. + + This converts a string like "ProjectWorkflow.Default" to for example + the Substance Painter Python API equivalent object, like: + `substance_painter.project.ProjectWorkflow.Default` + + Args: + substance_attr (str): The `substance_painter.project` attribute, + for example "ProjectWorkflow.Default" + + Returns: + Any: Substance Python API object of the project attribute. + + Raises: + ValueError: If attribute does not exist on the + `substance_painter.project` python api. + """ + root = substance_painter.project + for attr in substance_attr.split("."): + root = getattr(root, attr, None) + if root is None: + raise ValueError( + "Substance Painter project attribute" + f" does not exist: {substance_attr}") + + return root + + +def get_template_by_name(name: str, templates: list[dict]) -> dict: + return next( + template for template in templates + if template["name"] == name + ) + + +class SubstanceProjectConfigurationWindow(QtWidgets.QDialog): + """The pop-up dialog allows users to choose material + duplicate options for importing Max objects when updating + or switching assets. + """ + def __init__(self, project_templates): + super(SubstanceProjectConfigurationWindow, self).__init__() + self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) + + self.configuration = None + self.template_names = [template["name"] for template + in project_templates] + self.project_templates = project_templates + + self.widgets = { + "label": QtWidgets.QLabel( + "Select your template for project configuration"), + "template_options": QtWidgets.QComboBox(), + "import_cameras": QtWidgets.QCheckBox("Import Cameras"), + "preserve_strokes": QtWidgets.QCheckBox("Preserve Strokes"), + "clickbox": QtWidgets.QWidget(), + "combobox": QtWidgets.QWidget(), + "buttons": QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok + | QtWidgets.QDialogButtonBox.Cancel) + } + + self.widgets["template_options"].addItems(self.template_names) + + template_name = self.widgets["template_options"].currentText() + self._update_to_match_template(template_name) + # Build clickboxes + layout = QtWidgets.QHBoxLayout(self.widgets["clickbox"]) + layout.addWidget(self.widgets["import_cameras"]) + layout.addWidget(self.widgets["preserve_strokes"]) + # Build combobox + layout = QtWidgets.QHBoxLayout(self.widgets["combobox"]) + layout.addWidget(self.widgets["template_options"]) + # Build buttons + layout = QtWidgets.QHBoxLayout(self.widgets["buttons"]) + # Build layout. + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(self.widgets["label"]) + layout.addWidget(self.widgets["combobox"]) + layout.addWidget(self.widgets["clickbox"]) + layout.addWidget(self.widgets["buttons"]) + + self.widgets["template_options"].currentTextChanged.connect( + self._update_to_match_template) + self.widgets["buttons"].accepted.connect(self.on_accept) + self.widgets["buttons"].rejected.connect(self.on_reject) + + def on_accept(self): + self.configuration = self.get_project_configuration() + self.close() + + def on_reject(self): + self.close() + + def _update_to_match_template(self, template_name): + template = get_template_by_name(template_name, self.project_templates) + self.widgets["import_cameras"].setChecked(template["import_cameras"]) + self.widgets["preserve_strokes"].setChecked( + template["preserve_strokes"]) + + def get_project_configuration(self): + templates = self.project_templates + template_name = self.widgets["template_options"].currentText() + template = get_template_by_name(template_name, templates) + template = copy.deepcopy(template) # do not edit the original + template["import_cameras"] = self.widgets["import_cameras"].isChecked() + template["preserve_strokes"] = ( + self.widgets["preserve_strokes"].isChecked() + ) + for key in ["normal_map_format", + "project_workflow", + "tangent_space_mode"]: + template[key] = _convert(template[key]) + return template + + @classmethod + def prompt(cls, templates): + dialog = cls(templates) + dialog.exec_() + configuration = dialog.configuration + dialog.deleteLater() + return configuration class SubstanceLoadProjectMesh(load.LoaderPlugin): @@ -25,48 +150,35 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin): icon = "code-fork" color = "orange" - options = [ - qargparse.Boolean( - "preserve_strokes", - default=True, - help="Preserve strokes positions on mesh.\n" - "(only relevant when loading into existing project)" - ), - qargparse.Boolean( - "import_cameras", - default=True, - help="Import cameras from the mesh file." - ) - ] + # Defined via settings + project_templates = [] - def load(self, context, name, namespace, data): + def load(self, context, name, namespace, options=None): # Get user inputs - import_cameras = data.get("import_cameras", True) - preserve_strokes = data.get("preserve_strokes", True) - sp_settings = substance_painter.project.Settings( - import_cameras=import_cameras - ) + result = SubstanceProjectConfigurationWindow.prompt( + self.project_templates) + if not result: + # cancelling loader action + return if not substance_painter.project.is_open(): # Allow to 'initialize' a new project path = self.filepath_from_context(context) - # TODO: improve the prompt dialog function to not - # only works for simple polygon scene - result = prompt_new_file_with_mesh(mesh_filepath=path) - if not result: - self.log.info("User cancelled new project prompt." - "Creating new project directly from" - " Substance Painter API Instead.") - settings = substance_painter.project.create( - mesh_file_path=path, settings=sp_settings - ) - + sp_settings = substance_painter.project.Settings( + import_cameras=result["import_cameras"], + normal_map_format=result["normal_map_format"], + project_workflow=result["project_workflow"], + tangent_space_mode=result["tangent_space_mode"], + default_texture_resolution=result["default_texture_resolution"] + ) + settings = substance_painter.project.create( + mesh_file_path=path, settings=sp_settings + ) else: # Reload the mesh settings = substance_painter.project.MeshReloadingSettings( - import_cameras=import_cameras, - preserve_strokes=preserve_strokes - ) + import_cameras=result["import_cameras"], + preserve_strokes=result["preserve_strokes"]) def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa @@ -92,7 +204,7 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin): # from the user's original choice. We don't store 'preserve_strokes' # as we always preserve strokes on updates. container["options"] = { - "import_cameras": import_cameras, + "import_cameras": result["import_cameras"], } set_container_metadata(project_mesh_object_name, container) diff --git a/client/ayon_core/hosts/traypublisher/addon.py b/client/ayon_core/hosts/traypublisher/addon.py index 70bdfe9a64..3dd275f223 100644 --- a/client/ayon_core/hosts/traypublisher/addon.py +++ b/client/ayon_core/hosts/traypublisher/addon.py @@ -1,5 +1,6 @@ import os +from pathlib import Path from ayon_core.lib import get_ayon_launcher_args from ayon_core.lib.execute import run_detached_process from ayon_core.addon import ( @@ -57,3 +58,62 @@ def launch(): from ayon_core.tools import traypublisher traypublisher.main() + + +@cli_main.command() +@click_wrap.option( + "--filepath", + help="Full path to CSV file with data", + type=str, + required=True +) +@click_wrap.option( + "--project", + help="Project name in which the context will be used", + type=str, + required=True +) +@click_wrap.option( + "--folder-path", + help="Asset name in which the context will be used", + type=str, + required=True +) +@click_wrap.option( + "--task", + help="Task name under Asset in which the context will be used", + type=str, + required=False +) +@click_wrap.option( + "--ignore-validators", + help="Option to ignore validators", + type=bool, + is_flag=True, + required=False +) +def ingestcsv( + filepath, + project, + folder_path, + task, + ignore_validators +): + """Ingest CSV file into project. + + This command will ingest CSV file into project. CSV file must be in + specific format. See documentation for more information. + """ + from .csv_publish import csvpublish + + # use Path to check if csv_filepath exists + if not Path(filepath).exists(): + raise FileNotFoundError(f"File {filepath} does not exist.") + + csvpublish( + filepath, + project, + folder_path, + task, + ignore_validators + ) diff --git a/client/ayon_core/hosts/traypublisher/csv_publish.py b/client/ayon_core/hosts/traypublisher/csv_publish.py new file mode 100644 index 0000000000..2762172936 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/csv_publish.py @@ -0,0 +1,84 @@ +import pyblish.api +import pyblish.util + +from ayon_api import get_folder_by_path, get_task_by_name +from ayon_core.lib.attribute_definitions import FileDefItem +from ayon_core.pipeline import install_host +from ayon_core.pipeline.create import CreateContext + +from ayon_core.hosts.traypublisher.api import TrayPublisherHost + + +def csvpublish( + filepath, + project_name, + folder_path, + task_name=None, + ignore_validators=False +): + """Publish CSV file. + + Args: + filepath (str): Path to CSV file. + project_name (str): Project name. + folder_path (str): Folder path. + task_name (Optional[str]): Task name. + ignore_validators (Optional[bool]): Option to ignore validators. + """ + + # initialization of host + host = TrayPublisherHost() + install_host(host) + + # setting host context into project + host.set_project_name(project_name) + + # form precreate data with field values + file_field = FileDefItem.from_paths([filepath], False).pop().to_dict() + precreate_data = { + "csv_filepath_data": file_field, + } + + # create context initialization + create_context = CreateContext(host, headless=True) + folder_entity = get_folder_by_path( + project_name, + folder_path=folder_path, + ) + + if not folder_entity: + ValueError( + f"Folder path '{folder_path}' doesn't " + f"exists at project '{project_name}'." + ) + + task_entity = get_task_by_name( + project_name, + folder_entity["id"], + task_name, + ) + + if not task_entity: + ValueError( + f"Task name '{task_name}' doesn't " + f"exists at folder '{folder_path}'." + ) + + create_context.create( + "io.ayon.creators.traypublisher.csv_ingest", + "Main", + folder_entity=folder_entity, + task_entity=task_entity, + pre_create_data=precreate_data, + ) + + # publishing context initialization + pyblish_context = pyblish.api.Context() + pyblish_context.data["create_context"] = create_context + + # redefine targets (skip 'local' to disable validators) + if ignore_validators: + targets = ["default", "ingest"] + + # publishing + pyblish.util.publish(context=pyblish_context, targets=targets) diff --git a/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py new file mode 100644 index 0000000000..8143e8b45b --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/create/create_csv_ingest.py @@ -0,0 +1,741 @@ +import os +import re +import csv +import clique +from io import StringIO +from copy import deepcopy, copy + +from ayon_api import get_folder_by_path, get_task_by_name +from ayon_core.pipeline.create import get_product_name +from ayon_core.pipeline import CreatedInstance +from ayon_core.lib import FileDef, BoolDef +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS, IMAGE_EXTENSIONS +) +from ayon_core.pipeline.create import CreatorError +from ayon_core.hosts.traypublisher.api.plugin import ( + TrayPublishCreator +) + + +class IngestCSV(TrayPublishCreator): + """CSV ingest creator class""" + + icon = "fa.file" + + label = "CSV Ingest" + product_type = "csv_ingest_file" + identifier = "io.ayon.creators.traypublisher.csv_ingest" + + default_variants = ["Main"] + + description = "Ingest products' data from CSV file" + detailed_description = """ +Ingest products' data from CSV file following column and representation +configuration in project settings. +""" + + # Position in the list of creators. + order = 10 + + # settings for this creator + columns_config = {} + representations_config = {} + + def create(self, subset_name, instance_data, pre_create_data): + """Create an product from each row found in the CSV. + + Args: + subset_name (str): The subset name. + instance_data (dict): The instance data. + pre_create_data (dict): + """ + + csv_filepath_data = pre_create_data.get("csv_filepath_data", {}) + + folder = csv_filepath_data.get("directory", "") + if not os.path.exists(folder): + raise CreatorError( + f"Directory '{folder}' does not exist." + ) + filename = csv_filepath_data.get("filenames", []) + self._process_csv_file(subset_name, instance_data, folder, filename[0]) + + def _process_csv_file( + self, subset_name, instance_data, staging_dir, filename): + """Process CSV file. + + Args: + subset_name (str): The subset name. + instance_data (dict): The instance data. + staging_dir (str): The staging directory. + filename (str): The filename. + """ + + # create new instance from the csv file via self function + self._pass_data_to_csv_instance( + instance_data, + staging_dir, + filename + ) + + csv_instance = CreatedInstance( + self.product_type, subset_name, instance_data, self + ) + self._store_new_instance(csv_instance) + + csv_instance["csvFileData"] = { + "filename": filename, + "staging_dir": staging_dir, + } + + # from special function get all data from csv file and convert them + # to new instances + csv_data_for_instances = self._get_data_from_csv( + staging_dir, filename) + + # create instances from csv data via self function + self._create_instances_from_csv_data( + csv_data_for_instances, staging_dir + ) + + def _create_instances_from_csv_data( + self, + csv_data_for_instances, + staging_dir + ): + """Create instances from csv data""" + + for folder_path, prepared_data in csv_data_for_instances.items(): + project_name = self.create_context.get_current_project_name() + products = prepared_data["products"] + + for instance_name, product_data in products.items(): + # get important instance variables + task_name = product_data["task_name"] + task_type = product_data["task_type"] + variant = product_data["variant"] + product_type = product_data["product_type"] + version = product_data["version"] + + # create subset/product name + product_name = get_product_name( + project_name, + task_name, + task_type, + self.host_name, + product_type, + variant + ) + + # make sure frame start/end is inherited from csv columns + # expected frame range data are handles excluded + for _, repre_data in product_data["representations"].items(): # noqa: E501 + frame_start = repre_data["frameStart"] + frame_end = repre_data["frameEnd"] + handle_start = repre_data["handleStart"] + handle_end = repre_data["handleEnd"] + fps = repre_data["fps"] + break + + # try to find any version comment in representation data + version_comment = next( + iter( + repre_data["comment"] + for repre_data in product_data["representations"].values() # noqa: E501 + if repre_data["comment"] + ), + None + ) + + # try to find any slate switch in representation data + slate_exists = any( + repre_data["slate"] + for _, repre_data in product_data["representations"].items() # noqa: E501 + ) + + # get representations from product data + representations = product_data["representations"] + label = f"{folder_path}_{product_name}_v{version:>03}" + + families = ["csv_ingest"] + if slate_exists: + # adding slate to families mainly for loaders to be able + # to filter out slates + families.append("slate") + + # make product data + product_data = { + "name": instance_name, + "folderPath": folder_path, + "families": families, + "label": label, + "task": task_name, + "variant": variant, + "source": "csv", + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "fps": fps, + "version": version, + "comment": version_comment, + } + + # create new instance + new_instance = CreatedInstance( + product_type, product_name, product_data, self + ) + self._store_new_instance(new_instance) + + if not new_instance.get("prepared_data_for_repres"): + new_instance["prepared_data_for_repres"] = [] + + base_thumbnail_repre_data = { + "name": "thumbnail", + "ext": None, + "files": None, + "stagingDir": None, + "stagingDir_persistent": True, + "tags": ["thumbnail", "delete"], + } + # need to populate all thumbnails for all representations + # so we can check if unique thumbnail per representation + # is needed + thumbnails = [ + repre_data["thumbnailPath"] + for repre_data in representations.values() + if repre_data["thumbnailPath"] + ] + multiple_thumbnails = len(set(thumbnails)) > 1 + explicit_output_name = None + thumbnails_processed = False + for filepath, repre_data in representations.items(): + # check if any review derivate tag is present + reviewable = any( + tag for tag in repre_data.get("tags", []) + # tag can be `ftrackreview` or `review` + if "review" in tag + ) + # since we need to populate multiple thumbnails as + # representation with outputName for (Ftrack instance + # integrator) pairing with reviewable video representations + if ( + thumbnails + and multiple_thumbnails + and reviewable + ): + # multiple unique thumbnails per representation needs + # grouping by outputName + # mainly used in Ftrack instance integrator + explicit_output_name = repre_data["representationName"] + relative_thumbnail_path = repre_data["thumbnailPath"] + # representation might not have thumbnail path + # so ignore this one + if not relative_thumbnail_path: + continue + thumb_dir, thumb_file = \ + self._get_refactor_thumbnail_path( + staging_dir, relative_thumbnail_path) + filename, ext = os.path.splitext(thumb_file) + thumbnail_repr_data = deepcopy( + base_thumbnail_repre_data) + thumbnail_repr_data.update({ + "name": "thumbnail_{}".format(filename), + "ext": ext[1:], + "files": thumb_file, + "stagingDir": thumb_dir, + "outputName": explicit_output_name, + }) + new_instance["prepared_data_for_repres"].append({ + "type": "thumbnail", + "colorspace": None, + "representation": thumbnail_repr_data, + }) + # also add thumbnailPath for ayon to integrate + if not new_instance.get("thumbnailPath"): + new_instance["thumbnailPath"] = ( + os.path.join(thumb_dir, thumb_file) + ) + elif ( + thumbnails + and not multiple_thumbnails + and not thumbnails_processed + or not reviewable + ): + """ + For case where we have only one thumbnail + and not reviewable medias. This needs to be processed + only once per instance. + """ + if not thumbnails: + continue + # here we will use only one thumbnail for + # all representations + relative_thumbnail_path = repre_data["thumbnailPath"] + # popping last thumbnail from list since it is only one + # and we do not need to iterate again over it + if not relative_thumbnail_path: + relative_thumbnail_path = thumbnails.pop() + thumb_dir, thumb_file = \ + self._get_refactor_thumbnail_path( + staging_dir, relative_thumbnail_path) + _, ext = os.path.splitext(thumb_file) + thumbnail_repr_data = deepcopy( + base_thumbnail_repre_data) + thumbnail_repr_data.update({ + "ext": ext[1:], + "files": thumb_file, + "stagingDir": thumb_dir + }) + new_instance["prepared_data_for_repres"].append({ + "type": "thumbnail", + "colorspace": None, + "representation": thumbnail_repr_data, + }) + # also add thumbnailPath for ayon to integrate + if not new_instance.get("thumbnailPath"): + new_instance["thumbnailPath"] = ( + os.path.join(thumb_dir, thumb_file) + ) + + thumbnails_processed = True + + # get representation data + representation_data = self._get_representation_data( + filepath, repre_data, staging_dir, + explicit_output_name + ) + + new_instance["prepared_data_for_repres"].append({ + "type": "media", + "colorspace": repre_data["colorspace"], + "representation": representation_data, + }) + + def _get_refactor_thumbnail_path( + self, staging_dir, relative_thumbnail_path): + thumbnail_abs_path = os.path.join( + staging_dir, relative_thumbnail_path) + return os.path.split( + thumbnail_abs_path) + + def _get_representation_data( + self, filepath, repre_data, staging_dir, explicit_output_name=None + ): + """Get representation data + + Args: + filepath (str): Filepath to representation file. + repre_data (dict): Representation data from CSV file. + staging_dir (str): Staging directory. + explicit_output_name (Optional[str]): Explicit output name. + For grouping purposes with reviewable components. + Defaults to None. + """ + + # get extension of file + basename = os.path.basename(filepath) + extension = os.path.splitext(filepath)[-1].lower() + + # validate filepath is having correct extension based on output + repre_name = repre_data["representationName"] + repre_config_data = None + for repre in self.representations_config["representations"]: + if repre["name"] == repre_name: + repre_config_data = repre + break + + if not repre_config_data: + raise CreatorError( + f"Representation '{repre_name}' not found " + "in config representation data." + ) + + validate_extensions = repre_config_data["extensions"] + if extension not in validate_extensions: + raise CreatorError( + f"File extension '{extension}' not valid for " + f"output '{validate_extensions}'." + ) + + is_sequence = (extension in IMAGE_EXTENSIONS) + # convert ### string in file name to %03d + # this is for correct frame range validation + # example: file.###.exr -> file.%03d.exr + if "#" in basename: + padding = len(basename.split("#")) - 1 + basename = basename.replace("#" * padding, f"%0{padding}d") + is_sequence = True + + # make absolute path to file + absfilepath = os.path.normpath(os.path.join(staging_dir, filepath)) + dirname = os.path.dirname(absfilepath) + + # check if dirname exists + if not os.path.isdir(dirname): + raise CreatorError( + f"Directory '{dirname}' does not exist." + ) + + # collect all data from dirname + paths_for_collection = [] + for file in os.listdir(dirname): + filepath = os.path.join(dirname, file) + paths_for_collection.append(filepath) + + collections, _ = clique.assemble(paths_for_collection) + + if collections: + collections = collections[0] + else: + if is_sequence: + raise CreatorError( + f"No collections found in directory '{dirname}'." + ) + + frame_start = None + frame_end = None + if is_sequence: + files = [os.path.basename(file) for file in collections] + frame_start = list(collections.indexes)[0] + frame_end = list(collections.indexes)[-1] + else: + files = basename + + tags = deepcopy(repre_data["tags"]) + # if slate in repre_data is True then remove one frame from start + if repre_data["slate"]: + tags.append("has_slate") + + # get representation data + representation_data = { + "name": repre_name, + "ext": extension[1:], + "files": files, + "stagingDir": dirname, + "stagingDir_persistent": True, + "tags": tags, + } + if extension in VIDEO_EXTENSIONS: + representation_data.update({ + "fps": repre_data["fps"], + "outputName": repre_name, + }) + + if explicit_output_name: + representation_data["outputName"] = explicit_output_name + + if frame_start: + representation_data["frameStart"] = frame_start + if frame_end: + representation_data["frameEnd"] = frame_end + + return representation_data + + def _get_data_from_csv( + self, package_dir, filename + ): + """Generate instances from the csv file""" + # get current project name and code from context.data + project_name = self.create_context.get_current_project_name() + + csv_file_path = os.path.join( + package_dir, filename + ) + + # make sure csv file contains columns from following list + required_columns = [ + column["name"] for column in self.columns_config["columns"] + if column["required_column"] + ] + + # read csv file + with open(csv_file_path, "r") as csv_file: + csv_content = csv_file.read() + + # read csv file with DictReader + csv_reader = csv.DictReader( + StringIO(csv_content), + delimiter=self.columns_config["csv_delimiter"] + ) + + # fix fieldnames + # sometimes someone can keep extra space at the start or end of + # the column name + all_columns = [ + " ".join(column.rsplit()) for column in csv_reader.fieldnames] + + # return back fixed fieldnames + csv_reader.fieldnames = all_columns + + # check if csv file contains all required columns + if any(column not in all_columns for column in required_columns): + raise CreatorError( + f"Missing required columns: {required_columns}" + ) + + csv_data = {} + # get data from csv file + for row in csv_reader: + # Get required columns first + # TODO: will need to be folder path in CSV + # TODO: `context_asset_name` is now `folder_path` + folder_path = self._get_row_value_with_validation( + "Folder Path", row) + task_name = self._get_row_value_with_validation( + "Task Name", row) + version = self._get_row_value_with_validation( + "Version", row) + + # Get optional columns + variant = self._get_row_value_with_validation( + "Variant", row) + product_type = self._get_row_value_with_validation( + "Product Type", row) + + pre_product_name = ( + f"{task_name}{variant}{product_type}" + f"{version}".replace(" ", "").lower() + ) + + # get representation data + filename, representation_data = \ + self._get_representation_row_data(row) + + # TODO: batch query of all folder paths and task names + + # get folder entity from folder path + folder_entity = get_folder_by_path( + project_name, folder_path) + + # make sure asset exists + if not folder_entity: + raise CreatorError( + f"Asset '{folder_path}' not found." + ) + + # first get all tasks on the folder entity and then find + task_entity = get_task_by_name( + project_name, folder_entity["id"], task_name) + + # check if task name is valid task in asset doc + if not task_entity: + raise CreatorError( + f"Task '{task_name}' not found in asset doc." + ) + + # get all csv data into one dict and make sure there are no + # duplicates data are already validated and sorted under + # correct existing asset also check if asset exists and if + # task name is valid task in asset doc and representations + # are distributed under products following variants + if folder_path not in csv_data: + csv_data[folder_path] = { + "folder_entity": folder_entity, + "products": { + pre_product_name: { + "task_name": task_name, + "task_type": task_entity["taskType"], + "variant": variant, + "product_type": product_type, + "version": version, + "representations": { + filename: representation_data, + }, + } + } + } + else: + csv_products = csv_data[folder_path]["products"] + if pre_product_name not in csv_products: + csv_products[pre_product_name] = { + "task_name": task_name, + "task_type": task_entity["taskType"], + "variant": variant, + "product_type": product_type, + "version": version, + "representations": { + filename: representation_data, + }, + } + else: + csv_representations = \ + csv_products[pre_product_name]["representations"] + if filename in csv_representations: + raise CreatorError( + f"Duplicate filename '{filename}' in csv file." + ) + csv_representations[filename] = representation_data + + return csv_data + + def _get_representation_row_data(self, row_data): + """Get representation row data""" + # Get required columns first + file_path = self._get_row_value_with_validation( + "File Path", row_data) + frame_start = self._get_row_value_with_validation( + "Frame Start", row_data) + frame_end = self._get_row_value_with_validation( + "Frame End", row_data) + handle_start = self._get_row_value_with_validation( + "Handle Start", row_data) + handle_end = self._get_row_value_with_validation( + "Handle End", row_data) + fps = self._get_row_value_with_validation( + "FPS", row_data) + + # Get optional columns + thumbnail_path = self._get_row_value_with_validation( + "Version Thumbnail", row_data) + colorspace = self._get_row_value_with_validation( + "Representation Colorspace", row_data) + comment = self._get_row_value_with_validation( + "Version Comment", row_data) + repre = self._get_row_value_with_validation( + "Representation", row_data) + slate_exists = self._get_row_value_with_validation( + "Slate Exists", row_data) + repre_tags = self._get_row_value_with_validation( + "Representation Tags", row_data) + + # convert tags value to list + tags_list = copy(self.representations_config["default_tags"]) + if repre_tags: + tags_list = [] + tags_delimiter = self.representations_config["tags_delimiter"] + # strip spaces from repre_tags + if tags_delimiter in repre_tags: + tags = repre_tags.split(tags_delimiter) + for _tag in tags: + tags_list.append(("".join(_tag.strip())).lower()) + else: + tags_list.append(repre_tags) + + representation_data = { + "colorspace": colorspace, + "comment": comment, + "representationName": repre, + "slate": slate_exists, + "tags": tags_list, + "thumbnailPath": thumbnail_path, + "frameStart": int(frame_start), + "frameEnd": int(frame_end), + "handleStart": int(handle_start), + "handleEnd": int(handle_end), + "fps": float(fps), + } + return file_path, representation_data + + def _get_row_value_with_validation( + self, column_name, row_data, default_value=None + ): + """Get row value with validation""" + + # get column data from column config + column_data = None + for column in self.columns_config["columns"]: + if column["name"] == column_name: + column_data = column + break + + if not column_data: + raise CreatorError( + f"Column '{column_name}' not found in column config." + ) + + # get column value from row + column_value = row_data.get(column_name) + column_required = column_data["required_column"] + + # check if column value is not empty string and column is required + if column_value == "" and column_required: + raise CreatorError( + f"Value in column '{column_name}' is required." + ) + + # get column type + column_type = column_data["type"] + # get column validation regex + column_validation = column_data["validation_pattern"] + # get column default value + column_default = default_value or column_data["default"] + + if column_type in ["number", "decimal"] and column_default == 0: + column_default = None + + # check if column value is not empty string + if column_value == "": + # set default value if column value is empty string + column_value = column_default + + # set column value to correct type following column type + if column_type == "number" and column_value is not None: + column_value = int(column_value) + elif column_type == "decimal" and column_value is not None: + column_value = float(column_value) + elif column_type == "bool": + column_value = column_value in ["true", "True"] + + # check if column value matches validation regex + if ( + column_value is not None and + not re.match(str(column_validation), str(column_value)) + ): + raise CreatorError( + f"Column '{column_name}' value '{column_value}' " + f"does not match validation regex '{column_validation}' \n" + f"Row data: {row_data} \n" + f"Column data: {column_data}" + ) + + return column_value + + def _pass_data_to_csv_instance( + self, instance_data, staging_dir, filename + ): + """Pass CSV representation file to instance data""" + + representation = { + "name": "csv", + "ext": "csv", + "files": filename, + "stagingDir": staging_dir, + "stagingDir_persistent": True, + } + + instance_data.update({ + "label": f"CSV: {filename}", + "representations": [representation], + "stagingDir": staging_dir, + "stagingDir_persistent": True, + }) + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_pre_create_attr_defs(self): + """Creating pre-create attributes at creator plugin. + + Returns: + list: list of attribute object instances + """ + # Use same attributes as for instance attributes + attr_defs = [ + FileDef( + "csv_filepath_data", + folders=False, + extensions=[".csv"], + allow_sequences=False, + single_item=True, + label="CSV File", + ), + ] + return attr_defs diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py new file mode 100644 index 0000000000..33536d0854 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/collect_csv_ingest_instance_data.py @@ -0,0 +1,47 @@ +from pprint import pformat +import pyblish.api +from ayon_core.pipeline import publish + + +class CollectCSVIngestInstancesData( + pyblish.api.InstancePlugin, + publish.AYONPyblishPluginMixin, + publish.ColormanagedPyblishPluginMixin +): + """Collect CSV Ingest data from instance. + """ + + label = "Collect CSV Ingest instances data" + order = pyblish.api.CollectorOrder + 0.1 + hosts = ["traypublisher"] + families = ["csv_ingest"] + + def process(self, instance): + + # expecting [(colorspace, repre_data), ...] + prepared_repres_data_items = instance.data[ + "prepared_data_for_repres"] + + for prep_repre_data in prepared_repres_data_items: + type = prep_repre_data["type"] + colorspace = prep_repre_data["colorspace"] + repre_data = prep_repre_data["representation"] + + # thumbnails should be skipped + if type == "media": + # colorspace name is passed from CSV column + self.set_representation_colorspace( + repre_data, instance.context, colorspace + ) + elif type == "media" and colorspace is None: + # TODO: implement colorspace file rules file parsing + self.log.warning( + "Colorspace is not defined in csv for following" + f" representation: {pformat(repre_data)}" + ) + pass + elif type == "thumbnail": + # thumbnails should be skipped + pass + + instance.data["representations"].append(repre_data) diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py new file mode 100644 index 0000000000..4bdf7c0493 --- /dev/null +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/extract_csv_file.py @@ -0,0 +1,31 @@ +import pyblish.api + +from ayon_core.pipeline import publish + + +class ExtractCSVFile(publish.Extractor): + """ + Extractor export CSV file + """ + + label = "Extract CSV file" + order = pyblish.api.ExtractorOrder - 0.45 + families = ["csv_ingest_file"] + hosts = ["traypublisher"] + + def process(self, instance): + + csv_file_data = instance.data["csvFileData"] + + representation_csv = { + 'name': "csv_data", + 'ext': "csv", + 'files': csv_file_data["filename"], + "stagingDir": csv_file_data["staging_dir"], + "stagingDir_persistent": True + } + + instance.data["representations"].append(representation_csv) + + self.log.info("Added CSV file representation: {}".format( + representation_csv)) diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py index 3a62536507..0b4f8e16c1 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_existing_version.py @@ -16,6 +16,7 @@ class ValidateExistingVersion( order = ValidateContentsOrder hosts = ["traypublisher"] + targets = ["local"] actions = [RepairAction] diff --git a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py index 4f11571efe..13f13b05bb 100644 --- a/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py +++ b/client/ayon_core/hosts/traypublisher/plugins/publish/validate_frame_ranges.py @@ -16,6 +16,8 @@ class ValidateFrameRange(OptionalPyblishPluginMixin, label = "Validate Frame Range" hosts = ["traypublisher"] families = ["render", "plate"] + targets = ["local"] + order = ValidateContentsOrder optional = True diff --git a/client/ayon_core/hosts/unreal/ue_workers.py b/client/ayon_core/hosts/unreal/ue_workers.py index e3f8729c2e..256c0557be 100644 --- a/client/ayon_core/hosts/unreal/ue_workers.py +++ b/client/ayon_core/hosts/unreal/ue_workers.py @@ -260,11 +260,11 @@ class UEProjectGenerationWorker(UEWorker): self.failed.emit(msg, return_code) raise RuntimeError(msg) - # ensure we have PySide2 installed in engine + # ensure we have PySide2/6 installed in engine self.progress.emit(0) self.stage_begin.emit( - (f"Checking PySide2 installation... {stage_count} " + (f"Checking Qt bindings installation... {stage_count} " f" out of {stage_count}")) python_path = None if platform.system().lower() == "windows": @@ -287,11 +287,30 @@ class UEProjectGenerationWorker(UEWorker): msg = f"Unreal Python not found at {python_path}" self.failed.emit(msg, 1) raise RuntimeError(msg) - pyside_cmd = [python_path.as_posix(), - "-m", - "pip", - "install", - "pyside2"] + + pyside_version = "PySide2" + ue_version = self.ue_version.split(".") + if int(ue_version[0]) == 5 and int(ue_version[1]) >= 4: + # Use PySide6 6.6.3 because 6.7.0 had a bug + # - 'QPushButton' can't be added to 'QBoxLayout' + pyside_version = "PySide6==6.6.3" + + site_packages_prefix = python_path.parent.as_posix() + + pyside_cmd = [ + python_path.as_posix(), + "-m", "pip", + "install", + "--ignore-installed", + pyside_version, + + ] + + if platform.system().lower() == "windows": + pyside_cmd += ["--target", site_packages_prefix] + + print(f"--- Installing {pyside_version} ...") + print(" ".join(pyside_cmd)) pyside_install = subprocess.Popen(pyside_cmd, stdout=subprocess.PIPE, @@ -306,8 +325,8 @@ class UEProjectGenerationWorker(UEWorker): return_code = pyside_install.wait() if return_code and return_code != 0: - msg = ("Failed to create the project! " - "The installation of PySide2 has failed!") + msg = (f"Failed to create the project! {return_code} " + f"The installation of {pyside_version} has failed!: {pyside_install}") self.failed.emit(msg, return_code) raise RuntimeError(msg) diff --git a/client/ayon_core/lib/__init__.py b/client/ayon_core/lib/__init__.py index 408262ca42..e25d3479ee 100644 --- a/client/ayon_core/lib/__init__.py +++ b/client/ayon_core/lib/__init__.py @@ -27,6 +27,10 @@ from .local_settings import ( get_openpype_username, ) from .ayon_connection import initialize_ayon_connection +from .cache import ( + CacheItem, + NestedCacheItem, +) from .events import ( emit_event, register_event_callback @@ -135,6 +139,7 @@ from .path_tools import ( ) from .ayon_info import ( + is_in_ayon_launcher_process, is_running_from_build, is_using_ayon_console, is_staging_enabled, @@ -157,6 +162,9 @@ __all__ = [ "initialize_ayon_connection", + "CacheItem", + "NestedCacheItem", + "emit_event", "register_event_callback", @@ -241,6 +249,7 @@ __all__ = [ "Logger", + "is_in_ayon_launcher_process", "is_running_from_build", "is_using_ayon_console", "is_staging_enabled", diff --git a/client/ayon_core/lib/ayon_info.py b/client/ayon_core/lib/ayon_info.py index fc09a7c90c..c4333fab95 100644 --- a/client/ayon_core/lib/ayon_info.py +++ b/client/ayon_core/lib/ayon_info.py @@ -1,4 +1,5 @@ import os +import sys import json import datetime import platform @@ -25,6 +26,18 @@ def get_ayon_launcher_version(): return content["__version__"] +def is_in_ayon_launcher_process(): + """Determine if current process is running from AYON launcher. + + Returns: + bool: True if running from AYON launcher. + + """ + ayon_executable_path = os.path.normpath(os.environ["AYON_EXECUTABLE"]) + executable_path = os.path.normpath(sys.executable) + return ayon_executable_path == executable_path + + def is_running_from_build(): """Determine if current process is running from build or code. diff --git a/client/ayon_core/lib/cache.py b/client/ayon_core/lib/cache.py new file mode 100644 index 0000000000..dc83520f76 --- /dev/null +++ b/client/ayon_core/lib/cache.py @@ -0,0 +1,250 @@ +import time +import collections + +InitInfo = collections.namedtuple( + "InitInfo", + ["default_factory", "lifetime"] +) + + +def _default_factory_func(): + return None + + +class CacheItem: + """Simple cache item with lifetime and default factory for default value. + + Default factory should return default value that is used on init + and on reset. + + Args: + default_factory (Optional[callable]): Function that returns default + value used on init and on reset. + lifetime (Optional[int]): Lifetime of the cache data in seconds. + Default lifetime is 120 seconds. + + """ + def __init__(self, default_factory=None, lifetime=None): + if lifetime is None: + lifetime = 120 + self._lifetime = lifetime + self._last_update = None + if default_factory is None: + default_factory = _default_factory_func + self._default_factory = default_factory + self._data = default_factory() + + @property + def is_valid(self): + """Is cache valid to use. + + Return: + bool: True if cache is valid, False otherwise. + + """ + if self._last_update is None: + return False + + return (time.time() - self._last_update) < self._lifetime + + def set_lifetime(self, lifetime): + """Change lifetime of cache item. + + Args: + lifetime (int): Lifetime of the cache data in seconds. + """ + + self._lifetime = lifetime + + def set_invalid(self): + """Set cache as invalid.""" + + self._last_update = None + + def reset(self): + """Set cache as invalid and reset data.""" + + self._last_update = None + self._data = self._default_factory() + + def get_data(self): + """Receive cached data. + + Returns: + Any: Any data that are cached. + + """ + return self._data + + def update_data(self, data): + """Update cache data. + + Args: + data (Any): Any data that are cached. + + """ + self._data = data + self._last_update = time.time() + + +class NestedCacheItem: + """Helper for cached items stored in nested structure. + + Example: + >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0) + >>> cache["a"]["b"].is_valid + False + >>> cache["a"]["b"].get_data() + 0 + >>> cache["a"]["b"] = 1 + >>> cache["a"]["b"].is_valid + True + >>> cache["a"]["b"].get_data() + 1 + >>> cache.reset() + >>> cache["a"]["b"].is_valid + False + + Args: + levels (int): Number of nested levels where read cache is stored. + default_factory (Optional[callable]): Function that returns default + value used on init and on reset. + lifetime (Optional[int]): Lifetime of the cache data in seconds. + Default value is based on default value of 'CacheItem'. + _init_info (Optional[InitInfo]): Private argument. Init info for + nested cache where created from parent item. + + """ + def __init__( + self, levels=1, default_factory=None, lifetime=None, _init_info=None + ): + if levels < 1: + raise ValueError("Nested levels must be greater than 0") + self._data_by_key = {} + if _init_info is None: + _init_info = InitInfo(default_factory, lifetime) + self._init_info = _init_info + self._levels = levels + + def __getitem__(self, key): + """Get cached data. + + Args: + key (str): Key of the cache item. + + Returns: + Union[NestedCacheItem, CacheItem]: Cache item. + + """ + cache = self._data_by_key.get(key) + if cache is None: + if self._levels > 1: + cache = NestedCacheItem( + levels=self._levels - 1, + _init_info=self._init_info + ) + else: + cache = CacheItem( + self._init_info.default_factory, + self._init_info.lifetime + ) + self._data_by_key[key] = cache + return cache + + def __setitem__(self, key, value): + """Update cached data. + + Args: + key (str): Key of the cache item. + value (Any): Any data that are cached. + + """ + if self._levels > 1: + raise AttributeError(( + "{} does not support '__setitem__'. Lower nested level by {}" + ).format(self.__class__.__name__, self._levels - 1)) + cache = self[key] + cache.update_data(value) + + def get(self, key): + """Get cached data. + + Args: + key (str): Key of the cache item. + + Returns: + Union[NestedCacheItem, CacheItem]: Cache item. + + """ + return self[key] + + def cached_count(self): + """Amount of cached items. + + Returns: + int: Amount of cached items. + + """ + return len(self._data_by_key) + + def clear_key(self, key): + """Clear cached item by key. + + Args: + key (str): Key of the cache item. + + """ + self._data_by_key.pop(key, None) + + def clear_invalid(self): + """Clear all invalid cache items. + + Note: + To clear all cache items use 'reset'. + + """ + changed = {} + children_are_nested = self._levels > 1 + for key, cache in tuple(self._data_by_key.items()): + if children_are_nested: + output = cache.clear_invalid() + if output: + changed[key] = output + if not cache.cached_count(): + self._data_by_key.pop(key) + elif not cache.is_valid: + changed[key] = cache.get_data() + self._data_by_key.pop(key) + return changed + + def reset(self): + """Reset cache. + + Note: + To clear only invalid cache items use 'clear_invalid'. + + """ + self._data_by_key = {} + + def set_lifetime(self, lifetime): + """Change lifetime of all children cache items. + + Args: + lifetime (int): Lifetime of the cache data in seconds. + + """ + self._init_info.lifetime = lifetime + for cache in self._data_by_key.values(): + cache.set_lifetime(lifetime) + + @property + def is_valid(self): + """Raise reasonable error when called on wrong level. + + Raises: + AttributeError: If called on nested cache item. + + """ + raise AttributeError(( + "{} does not support 'is_valid'. Lower nested level by '{}'" + ).format(self.__class__.__name__, self._levels)) diff --git a/client/ayon_core/modules/deadline/__init__.py b/client/ayon_core/modules/deadline/__init__.py index 5631e501d8..683d8dbe4a 100644 --- a/client/ayon_core/modules/deadline/__init__.py +++ b/client/ayon_core/modules/deadline/__init__.py @@ -1,6 +1,8 @@ from .deadline_module import DeadlineModule +from .version import __version__ __all__ = ( "DeadlineModule", + "__version__" ) diff --git a/client/ayon_core/modules/deadline/abstract_submit_deadline.py b/client/ayon_core/modules/deadline/abstract_submit_deadline.py index 2e0518ae20..00e51100bc 100644 --- a/client/ayon_core/modules/deadline/abstract_submit_deadline.py +++ b/client/ayon_core/modules/deadline/abstract_submit_deadline.py @@ -49,6 +49,10 @@ def requests_post(*args, **kwargs): if 'verify' not in kwargs: kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa + + auth = kwargs.get("auth") + if auth: + kwargs["auth"] = tuple(auth) # explicit cast to tuple # add 10sec timeout before bailing out kwargs['timeout'] = 10 return requests.post(*args, **kwargs) @@ -70,6 +74,9 @@ def requests_get(*args, **kwargs): if 'verify' not in kwargs: kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa + auth = kwargs.get("auth") + if auth: + kwargs["auth"] = tuple(auth) # add 10sec timeout before bailing out kwargs['timeout'] = 10 return requests.get(*args, **kwargs) @@ -434,9 +441,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """Plugin entry point.""" self._instance = instance context = instance.context - self._deadline_url = context.data.get("defaultDeadline") - self._deadline_url = instance.data.get( - "deadlineUrl", self._deadline_url) + self._deadline_url = instance.data["deadline"]["url"] assert self._deadline_url, "Requires Deadline Webservice URL" @@ -460,7 +465,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, self.plugin_info = self.get_plugin_info() self.aux_files = self.get_aux_files() - job_id = self.process_submission() + auth = instance.data["deadline"]["auth"] + job_id = self.process_submission(auth) self.log.info("Submitted job to Deadline: {}.".format(job_id)) # TODO: Find a way that's more generic and not render type specific @@ -473,10 +479,10 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, job_info=render_job_info, plugin_info=render_plugin_info ) - render_job_id = self.submit(payload) + render_job_id = self.submit(payload, auth) self.log.info("Render job id: %s", render_job_id) - def process_submission(self): + def process_submission(self, auth=None): """Process data for submission. This takes Deadline JobInfo, PluginInfo, AuxFile, creates payload @@ -487,7 +493,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """ payload = self.assemble_payload() - return self.submit(payload) + return self.submit(payload, auth) @abstractmethod def get_job_info(self): @@ -577,7 +583,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, "AuxFiles": aux_files or self.aux_files } - def submit(self, payload): + def submit(self, payload, auth): """Submit payload to Deadline API end-point. This takes payload in the form of JSON file and POST it to @@ -585,6 +591,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, Args: payload (dict): dict to become json in deadline submission. + auth (tuple): (username, password) Returns: str: resulting Deadline job id. @@ -594,7 +601,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin, """ url = "{}/api/jobs".format(self._deadline_url) - response = requests_post(url, json=payload) + response = requests_post(url, json=payload, + auth=auth) if not response.ok: self.log.error("Submission failed!") self.log.error(response.status_code) diff --git a/client/ayon_core/modules/deadline/deadline_module.py b/client/ayon_core/modules/deadline/deadline_module.py index c0ba83477e..b1089bbfe2 100644 --- a/client/ayon_core/modules/deadline/deadline_module.py +++ b/client/ayon_core/modules/deadline/deadline_module.py @@ -19,23 +19,23 @@ class DeadlineModule(AYONAddon, IPluginPaths): def initialize(self, studio_settings): # This module is always enabled - deadline_urls = {} + deadline_servers_info = {} enabled = self.name in studio_settings if enabled: deadline_settings = studio_settings[self.name] - deadline_urls = { - url_item["name"]: url_item["value"] + deadline_servers_info = { + url_item["name"]: url_item for url_item in deadline_settings["deadline_urls"] } - if enabled and not deadline_urls: + if enabled and not deadline_servers_info: enabled = False self.log.warning(( "Deadline Webservice URLs are not specified. Disabling addon." )) self.enabled = enabled - self.deadline_urls = deadline_urls + self.deadline_servers_info = deadline_servers_info def get_plugin_paths(self): """Deadline plugin paths.""" @@ -45,13 +45,15 @@ class DeadlineModule(AYONAddon, IPluginPaths): } @staticmethod - def get_deadline_pools(webservice, log=None): + def get_deadline_pools(webservice, auth=None, log=None): """Get pools from Deadline. Args: webservice (str): Server url. - log (Logger) + auth (Optional[Tuple[str, str]]): Tuple containing username, + password + log (Optional[Logger]): Logger to log errors to, if provided. Returns: - list: Pools. + List[str]: Pools. Throws: RuntimeError: If deadline webservice is unreachable. @@ -63,7 +65,10 @@ class DeadlineModule(AYONAddon, IPluginPaths): argument = "{}/api/pools?NamesOnly=true".format(webservice) try: - response = requests_get(argument) + kwargs = {} + if auth: + kwargs["auth"] = auth + response = requests_get(argument, **kwargs) except requests.exceptions.ConnectionError as exc: msg = 'Cannot connect to DL web service {}'.format(webservice) log.error(msg) diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index ea4b7a213e..22022831a0 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -13,17 +13,45 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" # Run before collect_render. - order = pyblish.api.CollectorOrder + 0.005 + order = pyblish.api.CollectorOrder + 0.225 label = "Deadline Webservice from the Instance" - families = ["rendering", "renderlayer"] - hosts = ["maya"] + targets = ["local"] + families = ["render", + "rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender", + "usdrender", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop", + "publish.hou", + "image"] # for Fusion def process(self, instance): - instance.data["deadlineUrl"] = self._collect_deadline_url(instance) - instance.data["deadlineUrl"] = \ - instance.data["deadlineUrl"].strip().rstrip("/") + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + if not instance.data.get("deadline"): + instance.data["deadline"] = {} + + # todo: separate logic should be removed, all hosts should have same + host_name = instance.context.data["hostName"] + if host_name == "maya": + deadline_url = self._collect_deadline_url(instance) + else: + deadline_url = (instance.data.get("deadlineUrl") or # backwards + instance.data.get("deadline", {}).get("url")) + if deadline_url: + instance.data["deadline"]["url"] = deadline_url.strip().rstrip("/") + else: + instance.data["deadline"]["url"] = instance.context.data["deadline"]["defaultUrl"] # noqa self.log.debug( - "Using {} for submission.".format(instance.data["deadlineUrl"])) + "Using {} for submission".format(instance.data["deadline"]["url"])) def _collect_deadline_url(self, render_instance): # type: (pyblish.api.Instance) -> str @@ -49,13 +77,13 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): ["project_settings"] ["deadline"] ) - - default_server = render_instance.context.data["defaultDeadline"] + default_server_url = (render_instance.context.data["deadline"] + ["defaultUrl"]) # QUESTION How and where is this is set? Should be removed? instance_server = render_instance.data.get("deadlineServers") if not instance_server: self.log.debug("Using default server.") - return default_server + return default_server_url # Get instance server as sting. if isinstance(instance_server, int): @@ -66,7 +94,7 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): default_servers = { url_item["name"]: url_item["value"] - for url_item in deadline_settings["deadline_urls"] + for url_item in deadline_settings["deadline_servers_info"] } project_servers = ( render_instance.context.data diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py index b7ca227b01..9238e0ed95 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -18,10 +18,9 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): """ # Run before collect_deadline_server_instance. - order = pyblish.api.CollectorOrder + 0.0025 + order = pyblish.api.CollectorOrder + 0.200 label = "Default Deadline Webservice" - - pass_mongo_url = False + targets = ["local"] def process(self, context): try: @@ -33,15 +32,17 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): deadline_settings = context.data["project_settings"]["deadline"] deadline_server_name = deadline_settings["deadline_server"] - deadline_webservice = None + dl_server_info = None if deadline_server_name: - deadline_webservice = deadline_module.deadline_urls.get( + dl_server_info = deadline_module.deadline_servers_info.get( deadline_server_name) - default_deadline_webservice = deadline_module.deadline_urls["default"] - deadline_webservice = ( - deadline_webservice - or default_deadline_webservice - ) + if dl_server_info: + deadline_url = dl_server_info["value"] + else: + default_dl_server_info = deadline_module.deadline_servers_info[0] + deadline_url = default_dl_server_info["value"] - context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa + context.data["deadline"] = {} + context.data["deadline"]["defaultUrl"] = ( + deadline_url.strip().rstrip("/")) diff --git a/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py new file mode 100644 index 0000000000..5d03523c89 --- /dev/null +++ b/client/ayon_core/modules/deadline/plugins/publish/collect_user_credentials.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +"""Collect user credentials + +Requires: + context -> project_settings + instance.data["deadline"]["url"] + +Provides: + instance.data["deadline"] -> require_authentication (bool) + instance.data["deadline"] -> auth (tuple (str, str)) - + (username, password) or None +""" +import pyblish.api + +from ayon_api import get_server_api_connection +from ayon_core.modules.deadline.deadline_module import DeadlineModule +from ayon_core.modules.deadline import __version__ + + +class CollectDeadlineUserCredentials(pyblish.api.InstancePlugin): + """Collects user name and password for artist if DL requires authentication + """ + order = pyblish.api.CollectorOrder + 0.250 + label = "Collect Deadline User Credentials" + + targets = ["local"] + hosts = ["aftereffects", + "blender", + "fusion", + "harmony", + "nuke", + "maya", + "max", + "houdini"] + + families = ["render", + "rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender", + "usdrender", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop", + "publish.hou"] + + def process(self, instance): + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + collected_deadline_url = instance.data["deadline"]["url"] + if not collected_deadline_url: + raise ValueError("Instance doesn't have '[deadline][url]'.") + context_data = instance.context.data + deadline_settings = context_data["project_settings"]["deadline"] + + deadline_server_name = None + # deadline url might be set directly from instance, need to find + # metadata for it + for deadline_info in deadline_settings["deadline_urls"]: + dl_settings_url = deadline_info["value"].strip().rstrip("/") + if dl_settings_url == collected_deadline_url: + deadline_server_name = deadline_info["name"] + break + + if not deadline_server_name: + raise ValueError(f"Collected {collected_deadline_url} doesn't " + "match any site configured in Studio Settings") + + instance.data["deadline"]["require_authentication"] = ( + deadline_info["require_authentication"] + ) + instance.data["deadline"]["auth"] = None + + if not deadline_info["require_authentication"]: + return + # TODO import 'get_addon_site_settings' when available + # in public 'ayon_api' + local_settings = get_server_api_connection().get_addon_site_settings( + DeadlineModule.name, __version__) + local_settings = local_settings["local_settings"] + for server_info in local_settings: + if deadline_server_name == server_info["server_name"]: + instance.data["deadline"]["auth"] = (server_info["username"], + server_info["password"]) diff --git a/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml new file mode 100644 index 0000000000..eec05df08a --- /dev/null +++ b/client/ayon_core/modules/deadline/plugins/publish/help/validate_deadline_connection.xml @@ -0,0 +1,17 @@ + + + + Deadline Authentication + +## Deadline authentication is required + +This project has set in Settings that Deadline requires authentication. + +### How to repair? + +Please go to Ayon Server > Site Settings and provide your Deadline username and password. +In some cases the password may be empty if Deadline is configured to allow that. Ask your administrator. + + + + \ No newline at end of file diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py index ab342c1a9d..f5805beb5c 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_blender_deadline.py @@ -174,7 +174,8 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, instance.data["toBeRenderedOn"] = "deadline" payload = self.assemble_payload() - return self.submit(payload) + return self.submit(payload, + auth=instance.data["deadline"]["auth"]) def from_published_scene(self): """ diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py index 1fae23c9b2..2220442dac 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_celaction_deadline.py @@ -2,9 +2,10 @@ import os import re import json import getpass -import requests import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post + class CelactionSubmitDeadline(pyblish.api.InstancePlugin): """Submit CelAction2D scene to Deadline @@ -30,11 +31,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin): context = instance.context - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" self.deadline_url = "{}/api/jobs".format(deadline_url) @@ -197,7 +194,8 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin): self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) - response = requests.post(self.deadline_url, json=payload) + response = requests_post(self.deadline_url, json=payload, + auth=instance.data["deadline"]["require_authentication"]) if not response.ok: self.log.error( diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py index e3a4cd8030..e9b93a47cd 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_fusion_deadline.py @@ -2,17 +2,13 @@ import os import json import getpass -import requests - import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline.publish import ( AYONPyblishPluginMixin ) -from ayon_core.lib import ( - BoolDef, - NumberDef, -) +from ayon_core.lib import NumberDef class FusionSubmitDeadline( @@ -64,11 +60,6 @@ class FusionSubmitDeadline( decimals=0, minimum=1, maximum=10 - ), - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" ) ] @@ -80,10 +71,6 @@ class FusionSubmitDeadline( attribute_values = self.get_attr_values_from_data( instance.data) - # add suspend_publish attributeValue to instance data - instance.data["suspend_publish"] = attribute_values[ - "suspend_publish"] - context = instance.context key = "__hasRun{}".format(self.__class__.__name__) @@ -94,11 +81,7 @@ class FusionSubmitDeadline( from ayon_core.hosts.fusion.api.lib import get_frame_path - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" # Collect all saver instances in context that are to be rendered @@ -258,7 +241,8 @@ class FusionSubmitDeadline( # E.g. http://192.168.0.1:8082/api/jobs url = "{}/api/jobs".format(deadline_url) - response = requests.post(url, json=payload) + auth = instance.data["deadline"]["auth"] + response = requests_post(url, json=payload, auth=auth) if not response.ok: raise Exception(response.text) diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 6952604293..597a3cfc55 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -10,7 +10,6 @@ from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from ayon_core.lib import ( is_in_tests, - BoolDef, TextDef, NumberDef ) @@ -90,11 +89,6 @@ class HoudiniSubmitDeadline( @classmethod def get_attribute_defs(cls): return [ - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" - ), NumberDef( "priority", label="Priority", diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py index cba05f6948..e9f6c382c5 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_max_deadline.py @@ -187,11 +187,13 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, payload_data, project_settings) job_infos, plugin_infos = payload for job_info, plugin_info in zip(job_infos, plugin_infos): - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit(self.assemble_payload(job_info, plugin_info), + instance.data["deadline"]["auth"]) else: payload = self._use_published_name(payload_data, project_settings) job_info, plugin_info = payload - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit(self.assemble_payload(job_info, plugin_info), + instance.data["deadline"]["auth"]) def _use_published_name(self, data, project_settings): # Not all hosts can import these modules. diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py index 0300b12104..250dc8b7ea 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -292,7 +292,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, return plugin_payload - def process_submission(self): + def process_submission(self, auth=None): from maya import cmds instance = self._instance @@ -332,7 +332,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, if "vrayscene" in instance.data["families"]: self.log.debug("Submitting V-Ray scene render..") vray_export_payload = self._get_vray_export_payload(payload_data) - export_job = self.submit(vray_export_payload) + export_job = self.submit(vray_export_payload, + instance.data["deadline"]["auth"]) payload = self._get_vray_render_payload(payload_data) @@ -351,7 +352,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, else: # Submit main render job job_info, plugin_info = payload - self.submit(self.assemble_payload(job_info, plugin_info)) + self.submit(self.assemble_payload(job_info, plugin_info), + instance.data["deadline"]["auth"]) def _tile_render(self, payload): """Submit as tile render per frame with dependent assembly jobs.""" @@ -451,7 +453,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # Submit frame tile jobs frame_tile_job_id = {} for frame, tile_job_payload in frame_payloads.items(): - job_id = self.submit(tile_job_payload) + job_id = self.submit(tile_job_payload, + instance.data["deadline"]["auth"]) frame_tile_job_id[frame] = job_id # Define assembly payloads @@ -559,7 +562,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, "submitting assembly job {} of {}".format(i + 1, num_assemblies) ) - assembly_job_id = self.submit(payload) + assembly_job_id = self.submit(payload, + instance.data["deadline"]["auth"]) assembly_job_ids.append(assembly_job_id) instance.data["assemblySubmissionJobs"] = assembly_job_ids diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py index d70cb75bf3..ef744ae1e1 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -4,9 +4,9 @@ import json import getpass from datetime import datetime -import requests import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline.publish import ( AYONPyblishPluginMixin ) @@ -76,11 +76,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, default=cls.use_gpu, label="Use GPU" ), - BoolDef( - "suspend_publish", - default=False, - label="Suspend publish" - ), BoolDef( "workfile_dependency", default=cls.workfile_dependency, @@ -100,20 +95,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, instance.data["attributeValues"] = self.get_attr_values_from_data( instance.data) - # add suspend_publish attributeValue to instance data - instance.data["suspend_publish"] = instance.data["attributeValues"][ - "suspend_publish"] - families = instance.data["families"] node = instance.data["transientData"]["node"] context = instance.context - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" self.deadline_url = "{}/api/jobs".format(deadline_url) @@ -436,7 +423,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin, self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) - response = requests.post(self.deadline_url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + response = requests_post(self.deadline_url, json=payload, timeout=10, + auth=auth) if not response.ok: raise Exception(response.text) diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py index 4e4657d886..ce15eda9a0 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_cache_job.py @@ -5,10 +5,10 @@ import json import re from copy import deepcopy -import requests import ayon_api import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline import publish from ayon_core.lib import EnumDef, is_in_tests from ayon_core.pipeline.version_start import get_versioning_start @@ -147,9 +147,6 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, instance_settings = self.get_attr_values_from_data(instance.data) initial_status = instance_settings.get("publishJobState", "Active") - # TODO: Remove this backwards compatibility of `suspend_publish` - if instance.data.get("suspend_publish"): - initial_status = "Suspended" args = [ "--headless", @@ -212,7 +209,9 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, self.log.debug("Submitting Deadline publish job ...") url = "{}/api/jobs".format(self.deadline_url) - response = requests.post(url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + response = requests_post(url, json=payload, timeout=10, + auth=auth) if not response.ok: raise Exception(response.text) @@ -344,11 +343,7 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, deadline_publish_job_id = None if submission_type == "deadline": - # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") + self.deadline_url = instance.data["deadline"]["url"] assert self.deadline_url, "Requires Deadline Webservice URL" deadline_publish_job_id = \ @@ -356,7 +351,9 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin, # Inject deadline url to instances. for inst in instances: - inst["deadlineUrl"] = self.deadline_url + if "deadline" not in inst: + inst["deadline"] = {} + inst["deadline"] = instance.data["deadline"] # publish job file publish_job = { diff --git a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py index 8def9cc63c..0f505dce78 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py +++ b/client/ayon_core/modules/deadline/plugins/publish/submit_publish_job.py @@ -5,11 +5,11 @@ import json import re from copy import deepcopy -import requests import clique import ayon_api import pyblish.api +from openpype_modules.deadline.abstract_submit_deadline import requests_post from ayon_core.pipeline import publish from ayon_core.lib import EnumDef, is_in_tests from ayon_core.pipeline.version_start import get_versioning_start @@ -88,9 +88,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, hosts = ["fusion", "max", "maya", "nuke", "houdini", "celaction", "aftereffects", "harmony", "blender"] - families = ["render.farm", "render.frames_farm", - "prerender.farm", "prerender.frames_farm", - "renderlayer", "imagesequence", + families = ["render", "render.farm", "render.frames_farm", + "prerender", "prerender.farm", "prerender.frames_farm", + "renderlayer", "imagesequence", "image", "vrayscene", "maxrender", "arnold_rop", "mantra_rop", "karma_rop", "vray_rop", @@ -224,9 +224,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, instance_settings = self.get_attr_values_from_data(instance.data) initial_status = instance_settings.get("publishJobState", "Active") - # TODO: Remove this backwards compatibility of `suspend_publish` - if instance.data.get("suspend_publish"): - initial_status = "Suspended" args = [ "--headless", @@ -306,7 +303,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, self.log.debug("Submitting Deadline publish job ...") url = "{}/api/jobs".format(self.deadline_url) - response = requests.post(url, json=payload, timeout=10) + auth = instance.data["deadline"]["auth"] + response = requests_post(url, json=payload, timeout=10, + auth=auth) if not response.ok: raise Exception(response.text) @@ -314,7 +313,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, return deadline_publish_job_id - def process(self, instance): # type: (pyblish.api.Instance) -> None """Process plugin. @@ -461,18 +459,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, } # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") + self.deadline_url = instance.data["deadline"]["url"] assert self.deadline_url, "Requires Deadline Webservice URL" deadline_publish_job_id = \ self._submit_deadline_post_job(instance, render_job, instances) - # Inject deadline url to instances. + # Inject deadline url to instances to query DL for job id for overrides for inst in instances: - inst["deadlineUrl"] = self.deadline_url + inst["deadline"] = instance.data["deadline"] # publish job file publish_job = { diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py index a7b300beff..8fffd47786 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_connection.py @@ -1,5 +1,7 @@ import pyblish.api +from ayon_core.pipeline import PublishXmlValidationError + from openpype_modules.deadline.abstract_submit_deadline import requests_get @@ -8,27 +10,42 @@ class ValidateDeadlineConnection(pyblish.api.InstancePlugin): label = "Validate Deadline Web Service" order = pyblish.api.ValidatorOrder - hosts = ["maya", "nuke"] - families = ["renderlayer", "render"] + hosts = ["maya", "nuke", "aftereffects", "harmony", "fusion"] + families = ["renderlayer", "render", "render.farm"] # cache responses = {} def process(self, instance): - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") - self.log.debug( - "We have deadline URL on instance {}".format(deadline_url) - ) + if not instance.data.get("farm"): + self.log.debug("Should not be processed on farm, skipping.") + return + + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" + kwargs = {} + if instance.data["deadline"]["require_authentication"]: + auth = instance.data["deadline"]["auth"] + kwargs["auth"] = auth + + if not auth[0]: + raise PublishXmlValidationError( + self, + "Deadline requires authentication. " + "At least username is required to be set in " + "Site Settings.") + if deadline_url not in self.responses: - self.responses[deadline_url] = requests_get(deadline_url) + self.responses[deadline_url] = requests_get(deadline_url, **kwargs) response = self.responses[deadline_url] + if response.status_code == 401: + raise PublishXmlValidationError( + self, + "Deadline requires authentication. " + "Provided credentials are not working. " + "Please change them in Site Settings") assert response.ok, "Response must be ok" assert response.text.startswith("Deadline Web Service "), ( "Web service did not respond with 'Deadline Web Service'" diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py index 2feb044cf1..2fb511bf51 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -37,8 +37,9 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, self.log.debug("Skipping local instance.") return - deadline_url = self.get_deadline_url(instance) - pools = self.get_pools(deadline_url) + deadline_url = instance.data["deadline"]["url"] + pools = self.get_pools(deadline_url, + instance.data["deadline"].get("auth")) invalid_pools = {} primary_pool = instance.data.get("primaryPool") @@ -61,22 +62,18 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, formatting_data={"pools_str": ", ".join(pools)} ) - def get_deadline_url(self, instance): - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - if instance.data.get("deadlineUrl"): - # if custom one is set in instance, use that - deadline_url = instance.data.get("deadlineUrl") - return deadline_url - - def get_pools(self, deadline_url): + def get_pools(self, deadline_url, auth): if deadline_url not in self.pools_per_url: self.log.debug( "Querying available pools for Deadline url: {}".format( deadline_url) ) pools = DeadlineModule.get_deadline_pools(deadline_url, + auth=auth, log=self.log) + # some DL return "none" as a pool name + if "none" not in pools: + pools.append("none") self.log.info("Available pools: {}".format(pools)) self.pools_per_url[deadline_url] = pools diff --git a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index 6263526d5c..83e867408c 100644 --- a/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/client/ayon_core/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -199,16 +199,16 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin): (dict): Job info from Deadline """ - # get default deadline webservice url from deadline module - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") + deadline_url = instance.data["deadline"]["url"] assert deadline_url, "Requires Deadline Webservice URL" url = "{}/api/jobs?JobID={}".format(deadline_url, job_id) try: - response = requests_get(url) + kwargs = {} + auth = instance.data["deadline"]["auth"] + if auth: + kwargs["auth"] = auth + response = requests_get(url, **kwargs) except requests.exceptions.ConnectionError: self.log.error("Deadline is not accessible at " "{}".format(deadline_url)) diff --git a/server_addon/deadline/server/version.py b/client/ayon_core/modules/deadline/version.py similarity index 100% rename from server_addon/deadline/server/version.py rename to client/ayon_core/modules/deadline/version.py diff --git a/client/ayon_core/pipeline/__init__.py b/client/ayon_core/pipeline/__init__.py index d1a181a353..8fd00ee6b6 100644 --- a/client/ayon_core/pipeline/__init__.py +++ b/client/ayon_core/pipeline/__init__.py @@ -97,6 +97,15 @@ from .context_tools import ( get_current_folder_path, get_current_task_name ) + +from .workfile import ( + discover_workfile_build_plugins, + register_workfile_build_plugin, + deregister_workfile_build_plugin, + register_workfile_build_plugin_path, + deregister_workfile_build_plugin_path, +) + install = install_host uninstall = uninstall_host @@ -198,6 +207,13 @@ __all__ = ( "get_current_folder_path", "get_current_task_name", + # Workfile templates + "discover_workfile_build_plugins", + "register_workfile_build_plugin", + "deregister_workfile_build_plugin", + "register_workfile_build_plugin_path", + "deregister_workfile_build_plugin_path", + # Backwards compatible function names "install", "uninstall", diff --git a/client/ayon_core/pipeline/anatomy/anatomy.py b/client/ayon_core/pipeline/anatomy/anatomy.py index 2aa8eeddbc..98bbaa9bdc 100644 --- a/client/ayon_core/pipeline/anatomy/anatomy.py +++ b/client/ayon_core/pipeline/anatomy/anatomy.py @@ -3,11 +3,16 @@ import re import copy import platform import collections -import time import ayon_api -from ayon_core.lib import Logger, get_local_site_id, StringTemplate +from ayon_core.lib import ( + Logger, + get_local_site_id, + StringTemplate, + CacheItem, + NestedCacheItem, +) from ayon_core.addon import AddonsManager from .exceptions import RootCombinationError, ProjectNotSet @@ -397,62 +402,11 @@ class BaseAnatomy(object): ) -class CacheItem: - """Helper to cache data. - - Helper does not handle refresh of data and does not mark data as outdated. - Who uses the object should check of outdated state on his own will. - """ - - default_lifetime = 10 - - def __init__(self, lifetime=None): - self._data = None - self._cached = None - self._lifetime = lifetime or self.default_lifetime - - @property - def data(self): - """Cached data/object. - - Returns: - Any: Whatever was cached. - """ - - return self._data - - @property - def is_outdated(self): - """Item has outdated cache. - - Lifetime of cache item expired or was not yet set. - - Returns: - bool: Item is outdated. - """ - - if self._cached is None: - return True - return (time.time() - self._cached) > self._lifetime - - def update_data(self, data): - """Update cache of data. - - Args: - data (Any): Data to cache. - """ - - self._data = data - self._cached = time.time() - - class Anatomy(BaseAnatomy): - _sitesync_addon_cache = CacheItem() - _project_cache = collections.defaultdict(CacheItem) - _default_site_id_cache = collections.defaultdict(CacheItem) - _root_overrides_cache = collections.defaultdict( - lambda: collections.defaultdict(CacheItem) - ) + _project_cache = NestedCacheItem(lifetime=10) + _sitesync_addon_cache = CacheItem(lifetime=60) + _default_site_id_cache = NestedCacheItem(lifetime=60) + _root_overrides_cache = NestedCacheItem(2, lifetime=60) def __init__( self, project_name=None, site_name=None, project_entity=None @@ -477,18 +431,18 @@ class Anatomy(BaseAnatomy): @classmethod def get_project_entity_from_cache(cls, project_name): project_cache = cls._project_cache[project_name] - if project_cache.is_outdated: + if not project_cache.is_valid: project_cache.update_data(ayon_api.get_project(project_name)) - return copy.deepcopy(project_cache.data) + return copy.deepcopy(project_cache.get_data()) @classmethod def get_sitesync_addon(cls): - if cls._sitesync_addon_cache.is_outdated: + if not cls._sitesync_addon_cache.is_valid: manager = AddonsManager() cls._sitesync_addon_cache.update_data( manager.get_enabled_addon("sitesync") ) - return cls._sitesync_addon_cache.data + return cls._sitesync_addon_cache.get_data() @classmethod def _get_studio_roots_overrides(cls, project_name): @@ -533,14 +487,14 @@ class Anatomy(BaseAnatomy): elif not site_name: # Use sync server to receive active site name project_cache = cls._default_site_id_cache[project_name] - if project_cache.is_outdated: + if not project_cache.is_valid: project_cache.update_data( sitesync_addon.get_active_site_type(project_name) ) - site_name = project_cache.data + site_name = project_cache.get_data() site_cache = cls._root_overrides_cache[project_name][site_name] - if site_cache.is_outdated: + if not site_cache.is_valid: if site_name == "studio": # Handle studio root overrides without sync server # - studio root overrides can be done even without sync server @@ -553,4 +507,4 @@ class Anatomy(BaseAnatomy): project_name, site_name ) site_cache.update_data(roots_overrides) - return site_cache.data + return site_cache.get_data() diff --git a/client/ayon_core/pipeline/create/context.py b/client/ayon_core/pipeline/create/context.py index b8618738fb..7615ce6aee 100644 --- a/client/ayon_core/pipeline/create/context.py +++ b/client/ayon_core/pipeline/create/context.py @@ -1987,12 +1987,12 @@ class CreateContext: "Folder '{}' was not found".format(folder_path) ) - task_name = None if task_entity is None: - task_name = self.get_current_task_name() - task_entity = ayon_api.get_task_by_name( - project_name, folder_entity["id"], task_name - ) + current_task_name = self.get_current_task_name() + if current_task_name: + task_entity = ayon_api.get_task_by_name( + project_name, folder_entity["id"], current_task_name + ) if pre_create_data is None: pre_create_data = {} @@ -2018,7 +2018,7 @@ class CreateContext: instance_data = { "folderPath": folder_entity["path"], - "task": task_name, + "task": task_entity["name"] if task_entity else None, "productType": creator.product_type, "variant": variant } @@ -2053,7 +2053,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 add_traceback = True exc_info = sys.exc_info() self.log.warning( @@ -2163,7 +2163,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2197,7 +2197,7 @@ class CreateContext: try: convertor.find_instances() - except: + except: # noqa: E722 failed_info.append( prepare_failed_convertor_operation_info( convertor.identifier, sys.exc_info() @@ -2373,7 +2373,7 @@ class CreateContext: exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2440,7 +2440,7 @@ class CreateContext: error_message.format(identifier, exc_info[1]) ) - except: + except: # noqa: E722 failed = True add_traceback = True exc_info = sys.exc_info() @@ -2546,7 +2546,7 @@ class CreateContext: try: self.run_convertor(convertor_identifier) - except: + except: # noqa: E722 failed_info.append( prepare_failed_convertor_operation_info( convertor_identifier, sys.exc_info() diff --git a/client/ayon_core/pipeline/farm/pyblish_functions.py b/client/ayon_core/pipeline/farm/pyblish_functions.py index eb6f8569d9..72deee185e 100644 --- a/client/ayon_core/pipeline/farm/pyblish_functions.py +++ b/client/ayon_core/pipeline/farm/pyblish_functions.py @@ -225,6 +225,7 @@ def create_skeleton_instance( instance_skeleton_data = { "productType": product_type, "productName": data["productName"], + "task": data["task"], "families": families, "folderPath": data["folderPath"], "frameStart": time_data.start, diff --git a/client/ayon_core/pipeline/publish/abstract_collect_render.py b/client/ayon_core/pipeline/publish/abstract_collect_render.py index c50dc16380..17cab876b6 100644 --- a/client/ayon_core/pipeline/publish/abstract_collect_render.py +++ b/client/ayon_core/pipeline/publish/abstract_collect_render.py @@ -80,6 +80,7 @@ class RenderInstance(object): anatomyData = attr.ib(default=None) outputDir = attr.ib(default=None) context = attr.ib(default=None) + deadline = attr.ib(default=None) # The source instance the data of this render instance should merge into source_instance = attr.ib(default=None, type=pyblish.api.Instance) @@ -215,13 +216,12 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): # add additional data data = self.add_additional_data(data) - render_instance_dict = attr.asdict(render_instance) - # Merge into source instance if provided, otherwise create instance - instance = render_instance_dict.pop("source_instance", None) + instance = render_instance.source_instance if instance is None: instance = context.create_instance(render_instance.name) + render_instance_dict = attr.asdict(render_instance) instance.data.update(render_instance_dict) instance.data.update(data) diff --git a/client/ayon_core/pipeline/thumbnails.py b/client/ayon_core/pipeline/thumbnails.py new file mode 100644 index 0000000000..dbb38615d8 --- /dev/null +++ b/client/ayon_core/pipeline/thumbnails.py @@ -0,0 +1,263 @@ +import os +import time +import collections + +import ayon_api + +from ayon_core.lib.local_settings import get_ayon_appdirs + + +FileInfo = collections.namedtuple( + "FileInfo", + ("path", "size", "modification_time") +) + + +class ThumbnailsCache: + """Cache of thumbnails on local storage. + + Thumbnails are cached to appdirs to predefined directory. Each project has + own subfolder with thumbnails -> that's because each project has own + thumbnail id validation and file names are thumbnail ids with matching + extension. Extensions are predefined (.png and .jpeg). + + Cache has cleanup mechanism which is triggered on initialized by default. + + The cleanup has 2 levels: + 1. soft cleanup which remove all files that are older then 'days_alive' + 2. max size cleanup which remove all files until the thumbnails folder + contains less then 'max_filesize' + - this is time consuming so it's not triggered automatically + + Args: + cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails). + """ + + # Lifetime of thumbnails (in seconds) + # - default 3 days + days_alive = 3 + # Max size of thumbnail directory (in bytes) + # - default 2 Gb + max_filesize = 2 * 1024 * 1024 * 1024 + + def __init__(self, cleanup=True): + self._thumbnails_dir = None + self._days_alive_secs = self.days_alive * 24 * 60 * 60 + if cleanup: + self.cleanup() + + def get_thumbnails_dir(self): + """Root directory where thumbnails are stored. + + Returns: + str: Path to thumbnails root. + """ + + if self._thumbnails_dir is None: + self._thumbnails_dir = get_ayon_appdirs("thumbnails") + return self._thumbnails_dir + + thumbnails_dir = property(get_thumbnails_dir) + + def get_thumbnails_dir_file_info(self): + """Get information about all files in thumbnails directory. + + Returns: + List[FileInfo]: List of file information about all files. + """ + + thumbnails_dir = self.thumbnails_dir + files_info = [] + if not os.path.exists(thumbnails_dir): + return files_info + + for root, _, filenames in os.walk(thumbnails_dir): + for filename in filenames: + path = os.path.join(root, filename) + files_info.append(FileInfo( + path, os.path.getsize(path), os.path.getmtime(path) + )) + return files_info + + def get_thumbnails_dir_size(self, files_info=None): + """Got full size of thumbnail directory. + + Args: + files_info (List[FileInfo]): Prepared file information about + files in thumbnail directory. + + Returns: + int: File size of all files in thumbnail directory. + """ + + if files_info is None: + files_info = self.get_thumbnails_dir_file_info() + + if not files_info: + return 0 + + return sum( + file_info.size + for file_info in files_info + ) + + def cleanup(self, check_max_size=False): + """Cleanup thumbnails directory. + + Args: + check_max_size (bool): Also cleanup files to match max size of + thumbnails directory. + """ + + thumbnails_dir = self.get_thumbnails_dir() + # Skip if thumbnails dir does not exist yet + if not os.path.exists(thumbnails_dir): + return + + self._soft_cleanup(thumbnails_dir) + if check_max_size: + self._max_size_cleanup(thumbnails_dir) + + def _soft_cleanup(self, thumbnails_dir): + current_time = time.time() + for root, _, filenames in os.walk(thumbnails_dir): + for filename in filenames: + path = os.path.join(root, filename) + modification_time = os.path.getmtime(path) + if current_time - modification_time > self._days_alive_secs: + os.remove(path) + + def _max_size_cleanup(self, thumbnails_dir): + files_info = self.get_thumbnails_dir_file_info() + size = self.get_thumbnails_dir_size(files_info) + if size < self.max_filesize: + return + + sorted_file_info = collections.deque( + sorted(files_info, key=lambda item: item.modification_time) + ) + diff = size - self.max_filesize + while diff > 0: + if not sorted_file_info: + break + + file_info = sorted_file_info.popleft() + diff -= file_info.size + os.remove(file_info.path) + + def get_thumbnail_filepath(self, project_name, thumbnail_id): + """Get thumbnail by thumbnail id. + + Args: + project_name (str): Name of project. + thumbnail_id (str): Thumbnail id. + + Returns: + Union[str, None]: Path to thumbnail image or None if thumbnail + is not cached yet. + """ + + if not thumbnail_id: + return None + + for ext in ( + ".png", + ".jpeg", + ): + filepath = os.path.join( + self.thumbnails_dir, project_name, thumbnail_id + ext + ) + if os.path.exists(filepath): + return filepath + return None + + def get_project_dir(self, project_name): + """Path to root directory for specific project. + + Args: + project_name (str): Name of project for which root directory path + should be returned. + + Returns: + str: Path to root of project's thumbnails. + """ + + return os.path.join(self.thumbnails_dir, project_name) + + def make_sure_project_dir_exists(self, project_name): + project_dir = self.get_project_dir(project_name) + if not os.path.exists(project_dir): + os.makedirs(project_dir) + return project_dir + + def store_thumbnail(self, project_name, thumbnail_id, content, mime_type): + """Store thumbnail to cache folder. + + Args: + project_name (str): Project where the thumbnail belong to. + thumbnail_id (str): Thumbnail id. + content (bytes): Byte content of thumbnail file. + mime_type (str): Type of content. + + Returns: + str: Path to cached thumbnail image file. + """ + + if mime_type == "image/png": + ext = ".png" + elif mime_type == "image/jpeg": + ext = ".jpeg" + else: + raise ValueError( + "Unknown mime type for thumbnail \"{}\"".format(mime_type)) + + project_dir = self.make_sure_project_dir_exists(project_name) + thumbnail_path = os.path.join(project_dir, thumbnail_id + ext) + with open(thumbnail_path, "wb") as stream: + stream.write(content) + + current_time = time.time() + os.utime(thumbnail_path, (current_time, current_time)) + + return thumbnail_path + + +class _CacheItems: + thumbnails_cache = ThumbnailsCache() + + +def get_thumbnail_path(project_name, thumbnail_id): + """Get path to thumbnail image. + + Args: + project_name (str): Project where thumbnail belongs to. + thumbnail_id (Union[str, None]): Thumbnail id. + + Returns: + Union[str, None]: Path to thumbnail image or None if thumbnail + id is not valid or thumbnail was not possible to receive. + + """ + if not thumbnail_id: + return None + + filepath = _CacheItems.thumbnails_cache.get_thumbnail_filepath( + project_name, thumbnail_id + ) + if filepath is not None: + return filepath + + # 'ayon_api' had a bug, public function + # 'get_thumbnail_by_id' did not return output of + # 'ServerAPI' method. + con = ayon_api.get_server_api_connection() + result = con.get_thumbnail_by_id(project_name, thumbnail_id) + + if result is not None and result.is_valid: + return _CacheItems.thumbnails_cache.store_thumbnail( + project_name, + thumbnail_id, + result.content, + result.content_type + ) + return None diff --git a/client/ayon_core/pipeline/workfile/__init__.py b/client/ayon_core/pipeline/workfile/__init__.py index 36766e3a04..05f939024c 100644 --- a/client/ayon_core/pipeline/workfile/__init__.py +++ b/client/ayon_core/pipeline/workfile/__init__.py @@ -21,6 +21,15 @@ from .utils import ( from .build_workfile import BuildWorkfile +from .workfile_template_builder import ( + discover_workfile_build_plugins, + register_workfile_build_plugin, + deregister_workfile_build_plugin, + register_workfile_build_plugin_path, + deregister_workfile_build_plugin_path, +) + + __all__ = ( "get_workfile_template_key_from_context", "get_workfile_template_key", @@ -39,4 +48,10 @@ __all__ = ( "should_open_workfiles_tool_on_launch", "BuildWorkfile", + + "discover_workfile_build_plugins", + "register_workfile_build_plugin", + "deregister_workfile_build_plugin", + "register_workfile_build_plugin_path", + "deregister_workfile_build_plugin_path", ) diff --git a/client/ayon_core/pipeline/workfile/workfile_template_builder.py b/client/ayon_core/pipeline/workfile/workfile_template_builder.py index 8082adc65d..bb94d87483 100644 --- a/client/ayon_core/pipeline/workfile/workfile_template_builder.py +++ b/client/ayon_core/pipeline/workfile/workfile_template_builder.py @@ -36,6 +36,7 @@ from ayon_core.lib import ( filter_profiles, attribute_definitions, ) +from ayon_core.lib.events import EventSystem, EventCallback, Event from ayon_core.lib.attribute_definitions import get_attributes_keys from ayon_core.pipeline import Anatomy from ayon_core.pipeline.load import ( @@ -43,6 +44,13 @@ from ayon_core.pipeline.load import ( get_representation_contexts, load_with_repre_context, ) +from ayon_core.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) from ayon_core.pipeline.create import ( discover_legacy_creator_plugins, @@ -124,6 +132,8 @@ class AbstractTemplateBuilder(object): self._current_task_entity = _NOT_SET self._linked_folder_entities = _NOT_SET + self._event_system = EventSystem() + @property def project_name(self): if isinstance(self._host, HostBase): @@ -211,10 +221,14 @@ class AbstractTemplateBuilder(object): Returns: List[PlaceholderPlugin]: Plugin classes available for host. """ + plugins = [] + # Backwards compatibility if hasattr(self._host, "get_workfile_build_placeholder_plugins"): return self._host.get_workfile_build_placeholder_plugins() - return [] + + plugins.extend(discover(PlaceholderPlugin)) + return plugins @property def host(self): @@ -257,6 +271,8 @@ class AbstractTemplateBuilder(object): self._project_settings = None + self._event_system = EventSystem() + self.clear_shared_data() self.clear_shared_populate_data() @@ -498,15 +514,21 @@ class AbstractTemplateBuilder(object): process if version is created """ - template_preset = self.get_template_preset() - - if template_path is None: - template_path = template_preset["path"] - - if keep_placeholders is None: - keep_placeholders = template_preset["keep_placeholder"] - if create_first_version is None: - create_first_version = template_preset["create_first_version"] + if any( + value is None + for value in [ + template_path, + keep_placeholders, + create_first_version, + ] + ): + template_preset = self.get_template_preset() + if template_path is None: + template_path = template_preset["path"] + if keep_placeholders is None: + keep_placeholders = template_preset["keep_placeholder"] + if create_first_version is None: + create_first_version = template_preset["create_first_version"] # check if first version is created created_version_workfile = False @@ -729,6 +751,16 @@ class AbstractTemplateBuilder(object): placeholder.set_finished() + # Trigger on_depth_processed event + self.emit_event( + topic="template.depth_processed", + data={ + "depth": iter_counter, + "placeholders_by_scene_id": placeholder_by_scene_id + }, + source="builder" + ) + # Clear shared data before getting new placeholders self.clear_shared_populate_data() @@ -747,6 +779,16 @@ class AbstractTemplateBuilder(object): placeholder_by_scene_id[identifier] = placeholder placeholders.append(placeholder) + # Trigger on_finished event + self.emit_event( + topic="template.finished", + data={ + "depth": iter_counter, + "placeholders_by_scene_id": placeholder_by_scene_id, + }, + source="builder" + ) + self.refresh() def _get_build_profiles(self): @@ -772,12 +814,14 @@ class AbstractTemplateBuilder(object): - 'project_settings/{host name}/templated_workfile_build/profiles' Returns: - str: Path to a template file with placeholders. + dict: Dictionary with `path`, `keep_placeholder` and + `create_first_version` settings from the template preset + for current context. Raises: TemplateProfileNotFound: When profiles are not filled. TemplateLoadFailed: Profile was found but path is not set. - TemplateNotFound: Path was set but file does not exists. + TemplateNotFound: Path was set but file does not exist. """ host_name = self.host_name @@ -872,6 +916,30 @@ class AbstractTemplateBuilder(object): "create_first_version": create_first_version } + def emit_event(self, topic, data=None, source=None) -> Event: + return self._event_system.emit(topic, data, source) + + def add_event_callback(self, topic, callback, order=None): + return self._event_system.add_callback(topic, callback, order=order) + + def add_on_finished_callback( + self, callback, order=None + ) -> EventCallback: + return self.add_event_callback( + topic="template.finished", + callback=callback, + order=order + ) + + def add_on_depth_processed_callback( + self, callback, order=None + ) -> EventCallback: + return self.add_event_callback( + topic="template.depth_processed", + callback=callback, + order=order + ) + @six.add_metaclass(ABCMeta) class PlaceholderPlugin(object): @@ -1904,3 +1972,23 @@ class CreatePlaceholderItem(PlaceholderItem): def create_failed(self, creator_data): self._failed_created_publish_instances.append(creator_data) + + +def discover_workfile_build_plugins(*args, **kwargs): + return discover(PlaceholderPlugin, *args, **kwargs) + + +def register_workfile_build_plugin(plugin: PlaceholderPlugin): + register_plugin(PlaceholderPlugin, plugin) + + +def deregister_workfile_build_plugin(plugin: PlaceholderPlugin): + deregister_plugin(PlaceholderPlugin, plugin) + + +def register_workfile_build_plugin_path(path: str): + register_plugin_path(PlaceholderPlugin, path) + + +def deregister_workfile_build_plugin_path(path: str): + deregister_plugin_path(PlaceholderPlugin, path) diff --git a/client/ayon_core/plugins/publish/integrate.py b/client/ayon_core/plugins/publish/integrate.py index ce34f2e88b..9ae96e1a20 100644 --- a/client/ayon_core/plugins/publish/integrate.py +++ b/client/ayon_core/plugins/publish/integrate.py @@ -42,7 +42,7 @@ def prepare_changes(old_entity, new_entity): Returns: dict[str, Any]: Changes that have new entity. - + """ changes = {} for key in set(new_entity.keys()): @@ -121,6 +121,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): "setdress", "layout", "ass", + "assProxy", "vdbcache", "scene", "vrayproxy", @@ -167,7 +168,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): "uasset", "blendScene", "yeticacheUE", - "tycache" + "tycache", + "csv_ingest_file", ] default_template_name = "publish" diff --git a/client/ayon_core/resources/app_icons/3de4.png b/client/ayon_core/resources/app_icons/3de4.png new file mode 100644 index 0000000000..bd0fe40d37 Binary files /dev/null and b/client/ayon_core/resources/app_icons/3de4.png differ diff --git a/client/ayon_core/tools/adobe_webserver/app.py b/client/ayon_core/tools/adobe_webserver/app.py index 7d97d7d66d..26bf638c91 100644 --- a/client/ayon_core/tools/adobe_webserver/app.py +++ b/client/ayon_core/tools/adobe_webserver/app.py @@ -104,14 +104,11 @@ class WebServerTool: again. In that case, use existing running webserver. Check here is easier than capturing exception from thread. """ - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - result = True - try: - sock.bind((host_name, port)) - result = False - except: - print("Port is in use") + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con: + result = con.connect_ex((host_name, port)) == 0 + if result: + print(f"Port {port} is already in use") return result def call(self, func): diff --git a/client/ayon_core/tools/common_models/cache.py b/client/ayon_core/tools/common_models/cache.py index 221a14160c..59b727728f 100644 --- a/client/ayon_core/tools/common_models/cache.py +++ b/client/ayon_core/tools/common_models/cache.py @@ -1,239 +1,31 @@ -import time -import collections +import warnings -InitInfo = collections.namedtuple( - "InitInfo", - ["default_factory", "lifetime"] +from ayon_core.lib import CacheItem as _CacheItem +from ayon_core.lib import NestedCacheItem as _NestedCacheItem + + +# Cache classes were moved to `ayon_core.lib.cache` +class CacheItem(_CacheItem): + def __init__(self, *args, **kwargs): + warnings.warn( + "Used 'CacheItem' from deprecated location " + "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + + +class NestedCacheItem(_NestedCacheItem): + def __init__(self, *args, **kwargs): + warnings.warn( + "Used 'NestedCacheItem' from deprecated location " + "'ayon_core.tools.common_models', use 'ayon_core.lib' instead.", + DeprecationWarning, + ) + super().__init__(*args, **kwargs) + + +__all__ = ( + "CacheItem", + "NestedCacheItem", ) - - -def _default_factory_func(): - return None - - -class CacheItem: - """Simple cache item with lifetime and default value. - - Args: - default_factory (Optional[callable]): Function that returns default - value used on init and on reset. - lifetime (Optional[int]): Lifetime of the cache data in seconds. - """ - - def __init__(self, default_factory=None, lifetime=None): - if lifetime is None: - lifetime = 120 - self._lifetime = lifetime - self._last_update = None - if default_factory is None: - default_factory = _default_factory_func - self._default_factory = default_factory - self._data = default_factory() - - @property - def is_valid(self): - """Is cache valid to use. - - Return: - bool: True if cache is valid, False otherwise. - """ - - if self._last_update is None: - return False - - return (time.time() - self._last_update) < self._lifetime - - def set_lifetime(self, lifetime): - """Change lifetime of cache item. - - Args: - lifetime (int): Lifetime of the cache data in seconds. - """ - - self._lifetime = lifetime - - def set_invalid(self): - """Set cache as invalid.""" - - self._last_update = None - - def reset(self): - """Set cache as invalid and reset data.""" - - self._last_update = None - self._data = self._default_factory() - - def get_data(self): - """Receive cached data. - - Returns: - Any: Any data that are cached. - """ - - return self._data - - def update_data(self, data): - self._data = data - self._last_update = time.time() - - -class NestedCacheItem: - """Helper for cached items stored in nested structure. - - Example: - >>> cache = NestedCacheItem(levels=2, default_factory=lambda: 0) - >>> cache["a"]["b"].is_valid - False - >>> cache["a"]["b"].get_data() - 0 - >>> cache["a"]["b"] = 1 - >>> cache["a"]["b"].is_valid - True - >>> cache["a"]["b"].get_data() - 1 - >>> cache.reset() - >>> cache["a"]["b"].is_valid - False - - Args: - levels (int): Number of nested levels where read cache is stored. - default_factory (Optional[callable]): Function that returns default - value used on init and on reset. - lifetime (Optional[int]): Lifetime of the cache data in seconds. - _init_info (Optional[InitInfo]): Private argument. Init info for - nested cache where created from parent item. - """ - - def __init__( - self, levels=1, default_factory=None, lifetime=None, _init_info=None - ): - if levels < 1: - raise ValueError("Nested levels must be greater than 0") - self._data_by_key = {} - if _init_info is None: - _init_info = InitInfo(default_factory, lifetime) - self._init_info = _init_info - self._levels = levels - - def __getitem__(self, key): - """Get cached data. - - Args: - key (str): Key of the cache item. - - Returns: - Union[NestedCacheItem, CacheItem]: Cache item. - """ - - cache = self._data_by_key.get(key) - if cache is None: - if self._levels > 1: - cache = NestedCacheItem( - levels=self._levels - 1, - _init_info=self._init_info - ) - else: - cache = CacheItem( - self._init_info.default_factory, - self._init_info.lifetime - ) - self._data_by_key[key] = cache - return cache - - def __setitem__(self, key, value): - """Update cached data. - - Args: - key (str): Key of the cache item. - value (Any): Any data that are cached. - """ - - if self._levels > 1: - raise AttributeError(( - "{} does not support '__setitem__'. Lower nested level by {}" - ).format(self.__class__.__name__, self._levels - 1)) - cache = self[key] - cache.update_data(value) - - def get(self, key): - """Get cached data. - - Args: - key (str): Key of the cache item. - - Returns: - Union[NestedCacheItem, CacheItem]: Cache item. - """ - - return self[key] - - def cached_count(self): - """Amount of cached items. - - Returns: - int: Amount of cached items. - """ - - return len(self._data_by_key) - - def clear_key(self, key): - """Clear cached item by key. - - Args: - key (str): Key of the cache item. - """ - - self._data_by_key.pop(key, None) - - def clear_invalid(self): - """Clear all invalid cache items. - - Note: - To clear all cache items use 'reset'. - """ - - changed = {} - children_are_nested = self._levels > 1 - for key, cache in tuple(self._data_by_key.items()): - if children_are_nested: - output = cache.clear_invalid() - if output: - changed[key] = output - if not cache.cached_count(): - self._data_by_key.pop(key) - elif not cache.is_valid: - changed[key] = cache.get_data() - self._data_by_key.pop(key) - return changed - - def reset(self): - """Reset cache. - - Note: - To clear only invalid cache items use 'clear_invalid'. - """ - - self._data_by_key = {} - - def set_lifetime(self, lifetime): - """Change lifetime of all children cache items. - - Args: - lifetime (int): Lifetime of the cache data in seconds. - """ - - self._init_info.lifetime = lifetime - for cache in self._data_by_key.values(): - cache.set_lifetime(lifetime) - - @property - def is_valid(self): - """Raise reasonable error when called on wront level. - - Raises: - AttributeError: If called on nested cache item. - """ - - raise AttributeError(( - "{} does not support 'is_valid'. Lower nested level by '{}'" - ).format(self.__class__.__name__, self._levels)) diff --git a/client/ayon_core/tools/common_models/hierarchy.py b/client/ayon_core/tools/common_models/hierarchy.py index d8b28f020d..78b8a7f492 100644 --- a/client/ayon_core/tools/common_models/hierarchy.py +++ b/client/ayon_core/tools/common_models/hierarchy.py @@ -6,8 +6,7 @@ import ayon_api import six from ayon_core.style import get_default_entity_icon_color - -from .cache import NestedCacheItem +from ayon_core.lib import NestedCacheItem HIERARCHY_MODEL_SENDER = "hierarchy.model" diff --git a/client/ayon_core/tools/common_models/projects.py b/client/ayon_core/tools/common_models/projects.py index e30561000e..19a38bee21 100644 --- a/client/ayon_core/tools/common_models/projects.py +++ b/client/ayon_core/tools/common_models/projects.py @@ -5,8 +5,7 @@ import ayon_api import six from ayon_core.style import get_default_entity_icon_color - -from .cache import CacheItem +from ayon_core.lib import CacheItem PROJECTS_MODEL_SENDER = "projects.model" diff --git a/client/ayon_core/tools/common_models/thumbnails.py b/client/ayon_core/tools/common_models/thumbnails.py index 1c3aadc49f..2fa1e36e5c 100644 --- a/client/ayon_core/tools/common_models/thumbnails.py +++ b/client/ayon_core/tools/common_models/thumbnails.py @@ -1,234 +1,15 @@ -import os -import time import collections import ayon_api -import appdirs -from .cache import NestedCacheItem - -FileInfo = collections.namedtuple( - "FileInfo", - ("path", "size", "modification_time") -) - - -class ThumbnailsCache: - """Cache of thumbnails on local storage. - - Thumbnails are cached to appdirs to predefined directory. Each project has - own subfolder with thumbnails -> that's because each project has own - thumbnail id validation and file names are thumbnail ids with matching - extension. Extensions are predefined (.png and .jpeg). - - Cache has cleanup mechanism which is triggered on initialized by default. - - The cleanup has 2 levels: - 1. soft cleanup which remove all files that are older then 'days_alive' - 2. max size cleanup which remove all files until the thumbnails folder - contains less then 'max_filesize' - - this is time consuming so it's not triggered automatically - - Args: - cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails). - """ - - # Lifetime of thumbnails (in seconds) - # - default 3 days - days_alive = 3 - # Max size of thumbnail directory (in bytes) - # - default 2 Gb - max_filesize = 2 * 1024 * 1024 * 1024 - - def __init__(self, cleanup=True): - self._thumbnails_dir = None - self._days_alive_secs = self.days_alive * 24 * 60 * 60 - if cleanup: - self.cleanup() - - def get_thumbnails_dir(self): - """Root directory where thumbnails are stored. - - Returns: - str: Path to thumbnails root. - """ - - if self._thumbnails_dir is None: - # TODO use generic function - directory = appdirs.user_data_dir("AYON", "Ynput") - self._thumbnails_dir = os.path.join(directory, "thumbnails") - return self._thumbnails_dir - - thumbnails_dir = property(get_thumbnails_dir) - - def get_thumbnails_dir_file_info(self): - """Get information about all files in thumbnails directory. - - Returns: - List[FileInfo]: List of file information about all files. - """ - - thumbnails_dir = self.thumbnails_dir - files_info = [] - if not os.path.exists(thumbnails_dir): - return files_info - - for root, _, filenames in os.walk(thumbnails_dir): - for filename in filenames: - path = os.path.join(root, filename) - files_info.append(FileInfo( - path, os.path.getsize(path), os.path.getmtime(path) - )) - return files_info - - def get_thumbnails_dir_size(self, files_info=None): - """Got full size of thumbnail directory. - - Args: - files_info (List[FileInfo]): Prepared file information about - files in thumbnail directory. - - Returns: - int: File size of all files in thumbnail directory. - """ - - if files_info is None: - files_info = self.get_thumbnails_dir_file_info() - - if not files_info: - return 0 - - return sum( - file_info.size - for file_info in files_info - ) - - def cleanup(self, check_max_size=False): - """Cleanup thumbnails directory. - - Args: - check_max_size (bool): Also cleanup files to match max size of - thumbnails directory. - """ - - thumbnails_dir = self.get_thumbnails_dir() - # Skip if thumbnails dir does not exist yet - if not os.path.exists(thumbnails_dir): - return - - self._soft_cleanup(thumbnails_dir) - if check_max_size: - self._max_size_cleanup(thumbnails_dir) - - def _soft_cleanup(self, thumbnails_dir): - current_time = time.time() - for root, _, filenames in os.walk(thumbnails_dir): - for filename in filenames: - path = os.path.join(root, filename) - modification_time = os.path.getmtime(path) - if current_time - modification_time > self._days_alive_secs: - os.remove(path) - - def _max_size_cleanup(self, thumbnails_dir): - files_info = self.get_thumbnails_dir_file_info() - size = self.get_thumbnails_dir_size(files_info) - if size < self.max_filesize: - return - - sorted_file_info = collections.deque( - sorted(files_info, key=lambda item: item.modification_time) - ) - diff = size - self.max_filesize - while diff > 0: - if not sorted_file_info: - break - - file_info = sorted_file_info.popleft() - diff -= file_info.size - os.remove(file_info.path) - - def get_thumbnail_filepath(self, project_name, thumbnail_id): - """Get thumbnail by thumbnail id. - - Args: - project_name (str): Name of project. - thumbnail_id (str): Thumbnail id. - - Returns: - Union[str, None]: Path to thumbnail image or None if thumbnail - is not cached yet. - """ - - if not thumbnail_id: - return None - - for ext in ( - ".png", - ".jpeg", - ): - filepath = os.path.join( - self.thumbnails_dir, project_name, thumbnail_id + ext - ) - if os.path.exists(filepath): - return filepath - return None - - def get_project_dir(self, project_name): - """Path to root directory for specific project. - - Args: - project_name (str): Name of project for which root directory path - should be returned. - - Returns: - str: Path to root of project's thumbnails. - """ - - return os.path.join(self.thumbnails_dir, project_name) - - def make_sure_project_dir_exists(self, project_name): - project_dir = self.get_project_dir(project_name) - if not os.path.exists(project_dir): - os.makedirs(project_dir) - return project_dir - - def store_thumbnail(self, project_name, thumbnail_id, content, mime_type): - """Store thumbnail to cache folder. - - Args: - project_name (str): Project where the thumbnail belong to. - thumbnail_id (str): Id of thumbnail. - content (bytes): Byte content of thumbnail file. - mime_data (str): Type of content. - - Returns: - str: Path to cached thumbnail image file. - """ - - if mime_type == "image/png": - ext = ".png" - elif mime_type == "image/jpeg": - ext = ".jpeg" - else: - raise ValueError( - "Unknown mime type for thumbnail \"{}\"".format(mime_type)) - - project_dir = self.make_sure_project_dir_exists(project_name) - thumbnail_path = os.path.join(project_dir, thumbnail_id + ext) - with open(thumbnail_path, "wb") as stream: - stream.write(content) - - current_time = time.time() - os.utime(thumbnail_path, (current_time, current_time)) - - return thumbnail_path +from ayon_core.lib import NestedCacheItem +from ayon_core.pipeline.thumbnails import get_thumbnail_path class ThumbnailsModel: entity_cache_lifetime = 240 # In seconds def __init__(self): - self._thumbnail_cache = ThumbnailsCache() self._paths_cache = collections.defaultdict(dict) self._folders_cache = NestedCacheItem( levels=2, lifetime=self.entity_cache_lifetime) @@ -283,28 +64,7 @@ class ThumbnailsModel: if thumbnail_id in project_cache: return project_cache[thumbnail_id] - filepath = self._thumbnail_cache.get_thumbnail_filepath( - project_name, thumbnail_id - ) - if filepath is not None: - project_cache[thumbnail_id] = filepath - return filepath - - # 'ayon_api' had a bug, public function - # 'get_thumbnail_by_id' did not return output of - # 'ServerAPI' method. - con = ayon_api.get_server_api_connection() - result = con.get_thumbnail_by_id(project_name, thumbnail_id) - if result is None: - pass - - elif result.is_valid: - filepath = self._thumbnail_cache.store_thumbnail( - project_name, - thumbnail_id, - result.content, - result.content_type - ) + filepath = get_thumbnail_path(project_name, thumbnail_id) project_cache[thumbnail_id] = filepath return filepath diff --git a/client/ayon_core/tools/loader/models/actions.py b/client/ayon_core/tools/loader/models/actions.py index ad2993af50..cfe91cadab 100644 --- a/client/ayon_core/tools/loader/models/actions.py +++ b/client/ayon_core/tools/loader/models/actions.py @@ -6,6 +6,7 @@ import uuid import ayon_api +from ayon_core.lib import NestedCacheItem from ayon_core.pipeline.load import ( discover_loader_plugins, ProductLoaderPlugin, @@ -17,7 +18,6 @@ from ayon_core.pipeline.load import ( LoadError, IncompatibleLoaderError, ) -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ActionItem ACTIONS_MODEL_SENDER = "actions.model" diff --git a/client/ayon_core/tools/loader/models/products.py b/client/ayon_core/tools/loader/models/products.py index 812446a012..a3bbc30a09 100644 --- a/client/ayon_core/tools/loader/models/products.py +++ b/client/ayon_core/tools/loader/models/products.py @@ -5,8 +5,8 @@ import arrow import ayon_api from ayon_api.operations import OperationsSession +from ayon_core.lib import NestedCacheItem from ayon_core.style import get_default_entity_icon_color -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ( ProductTypeItem, ProductItem, diff --git a/client/ayon_core/tools/loader/models/sitesync.py b/client/ayon_core/tools/loader/models/sitesync.py index 987510905b..02504c2ad3 100644 --- a/client/ayon_core/tools/loader/models/sitesync.py +++ b/client/ayon_core/tools/loader/models/sitesync.py @@ -2,9 +2,8 @@ import collections from ayon_api import get_representations, get_versions_links -from ayon_core.lib import Logger +from ayon_core.lib import Logger, NestedCacheItem from ayon_core.addon import AddonsManager -from ayon_core.tools.common_models import NestedCacheItem from ayon_core.tools.loader.abstract import ActionItem DOWNLOAD_IDENTIFIER = "sitesync.download" diff --git a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py index 47c5399cf7..4e34f9b58c 100644 --- a/client/ayon_core/tools/publisher/widgets/card_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/card_view_widgets.py @@ -52,6 +52,7 @@ class SelectionTypes: class BaseGroupWidget(QtWidgets.QWidget): selected = QtCore.Signal(str, str, str) removed_selected = QtCore.Signal() + double_clicked = QtCore.Signal() def __init__(self, group_name, parent): super(BaseGroupWidget, self).__init__(parent) @@ -192,6 +193,7 @@ class ConvertorItemsGroupWidget(BaseGroupWidget): else: widget = ConvertorItemCardWidget(item, self) widget.selected.connect(self._on_widget_selection) + widget.double_clicked(self.double_clicked) self._widgets_by_id[item.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 @@ -254,6 +256,7 @@ class InstanceGroupWidget(BaseGroupWidget): ) widget.selected.connect(self._on_widget_selection) widget.active_changed.connect(self._on_active_changed) + widget.double_clicked.connect(self.double_clicked) self._widgets_by_id[instance.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 @@ -271,6 +274,7 @@ class CardWidget(BaseClickableFrame): # Group identifier of card # - this must be set because if send when mouse is released with card id _group_identifier = None + double_clicked = QtCore.Signal() def __init__(self, parent): super(CardWidget, self).__init__(parent) @@ -279,6 +283,11 @@ class CardWidget(BaseClickableFrame): self._selected = False self._id = None + def mouseDoubleClickEvent(self, event): + super(CardWidget, self).mouseDoubleClickEvent(event) + if self._is_valid_double_click(event): + self.double_clicked.emit() + @property def id(self): """Id of card.""" @@ -312,6 +321,9 @@ class CardWidget(BaseClickableFrame): self.selected.emit(self._id, self._group_identifier, selection_type) + def _is_valid_double_click(self, event): + return True + class ContextCardWidget(CardWidget): """Card for global context. @@ -527,6 +539,15 @@ class InstanceCardWidget(CardWidget): def _on_expend_clicked(self): self._set_expanded() + def _is_valid_double_click(self, event): + widget = self.childAt(event.pos()) + if ( + widget is self._active_checkbox + or widget is self._expand_btn + ): + return False + return True + class InstanceCardView(AbstractInstanceView): """Publish access to card view. @@ -534,6 +555,8 @@ class InstanceCardView(AbstractInstanceView): Wrapper of all widgets in card view. """ + double_clicked = QtCore.Signal() + def __init__(self, controller, parent): super(InstanceCardView, self).__init__(parent) @@ -715,6 +738,7 @@ class InstanceCardView(AbstractInstanceView): ) group_widget.active_changed.connect(self._on_active_changed) group_widget.selected.connect(self._on_widget_selection) + group_widget.double_clicked.connect(self.double_clicked) self._content_layout.insertWidget(widget_idx, group_widget) self._widgets_by_group[group_name] = group_widget @@ -755,6 +779,7 @@ class InstanceCardView(AbstractInstanceView): widget = ContextCardWidget(self._content_widget) widget.selected.connect(self._on_widget_selection) + widget.double_clicked.connect(self.double_clicked) self._context_widget = widget @@ -778,6 +803,7 @@ class InstanceCardView(AbstractInstanceView): CONVERTOR_ITEM_GROUP, self._content_widget ) group_widget.selected.connect(self._on_widget_selection) + group_widget.double_clicked.connect(self.double_clicked) self._content_layout.insertWidget(1, group_widget) self._convertor_items_group = group_widget diff --git a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py index 3322a73be6..71be0ab1a4 100644 --- a/client/ayon_core/tools/publisher/widgets/list_view_widgets.py +++ b/client/ayon_core/tools/publisher/widgets/list_view_widgets.py @@ -110,6 +110,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): This is required to be able use custom checkbox on custom place. """ active_changed = QtCore.Signal(str, bool) + double_clicked = QtCore.Signal() def __init__(self, instance, parent): super(InstanceListItemWidget, self).__init__(parent) @@ -149,6 +150,12 @@ class InstanceListItemWidget(QtWidgets.QWidget): self._set_valid_property(instance.has_valid_context) + def mouseDoubleClickEvent(self, event): + widget = self.childAt(event.pos()) + super(InstanceListItemWidget, self).mouseDoubleClickEvent(event) + if widget is not self._active_checkbox: + self.double_clicked.emit() + def _set_valid_property(self, valid): if self._has_valid_context == valid: return @@ -209,6 +216,8 @@ class InstanceListItemWidget(QtWidgets.QWidget): class ListContextWidget(QtWidgets.QFrame): """Context (or global attributes) widget.""" + double_clicked = QtCore.Signal() + def __init__(self, parent): super(ListContextWidget, self).__init__(parent) @@ -225,6 +234,10 @@ class ListContextWidget(QtWidgets.QFrame): self.label_widget = label_widget + def mouseDoubleClickEvent(self, event): + super(ListContextWidget, self).mouseDoubleClickEvent(event) + self.double_clicked.emit() + class InstanceListGroupWidget(QtWidgets.QFrame): """Widget representing group of instances. @@ -317,6 +330,7 @@ class InstanceListGroupWidget(QtWidgets.QFrame): class InstanceTreeView(QtWidgets.QTreeView): """View showing instances and their groups.""" toggle_requested = QtCore.Signal(int) + double_clicked = QtCore.Signal() def __init__(self, *args, **kwargs): super(InstanceTreeView, self).__init__(*args, **kwargs) @@ -425,6 +439,9 @@ class InstanceListView(AbstractInstanceView): This is public access to and from list view. """ + + double_clicked = QtCore.Signal() + def __init__(self, controller, parent): super(InstanceListView, self).__init__(parent) @@ -454,6 +471,7 @@ class InstanceListView(AbstractInstanceView): instance_view.collapsed.connect(self._on_collapse) instance_view.expanded.connect(self._on_expand) instance_view.toggle_requested.connect(self._on_toggle_request) + instance_view.double_clicked.connect(self.double_clicked) self._group_items = {} self._group_widgets = {} @@ -687,6 +705,7 @@ class InstanceListView(AbstractInstanceView): self._active_toggle_enabled ) widget.active_changed.connect(self._on_active_changed) + widget.double_clicked.connect(self.double_clicked) self._instance_view.setIndexWidget(proxy_index, widget) self._widgets_by_id[instance.id] = widget @@ -717,6 +736,7 @@ class InstanceListView(AbstractInstanceView): ) proxy_index = self._proxy_model.mapFromSource(index) widget = ListContextWidget(self._instance_view) + widget.double_clicked.connect(self.double_clicked) self._instance_view.setIndexWidget(proxy_index, widget) self._context_widget = widget diff --git a/client/ayon_core/tools/publisher/widgets/overview_widget.py b/client/ayon_core/tools/publisher/widgets/overview_widget.py index dd82185830..cedf52ae01 100644 --- a/client/ayon_core/tools/publisher/widgets/overview_widget.py +++ b/client/ayon_core/tools/publisher/widgets/overview_widget.py @@ -18,6 +18,7 @@ class OverviewWidget(QtWidgets.QFrame): instance_context_changed = QtCore.Signal() create_requested = QtCore.Signal() convert_requested = QtCore.Signal() + publish_tab_requested = QtCore.Signal() anim_end_value = 200 anim_duration = 200 @@ -113,9 +114,15 @@ class OverviewWidget(QtWidgets.QFrame): product_list_view.selection_changed.connect( self._on_product_change ) + product_list_view.double_clicked.connect( + self.publish_tab_requested + ) product_view_cards.selection_changed.connect( self._on_product_change ) + product_view_cards.double_clicked.connect( + self.publish_tab_requested + ) # Active instances changed product_list_view.active_changed.connect( self._on_active_changed diff --git a/client/ayon_core/tools/publisher/window.py b/client/ayon_core/tools/publisher/window.py index 123864ff6c..1b13ced317 100644 --- a/client/ayon_core/tools/publisher/window.py +++ b/client/ayon_core/tools/publisher/window.py @@ -258,6 +258,9 @@ class PublisherWindow(QtWidgets.QDialog): overview_widget.convert_requested.connect( self._on_convert_requested ) + overview_widget.publish_tab_requested.connect( + self._go_to_publish_tab + ) save_btn.clicked.connect(self._on_save_clicked) reset_btn.clicked.connect(self._on_reset_clicked) diff --git a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py index 6a1572deb2..fe6abee951 100644 --- a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py +++ b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py @@ -20,6 +20,8 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel): controller (AbstractWorkfilesFrontend): The control object. """ + refreshed = QtCore.Signal() + def __init__(self, controller): super(WorkAreaFilesModel, self).__init__() @@ -163,6 +165,12 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel): self._fill_items() def _fill_items(self): + try: + self._fill_items_impl() + finally: + self.refreshed.emit() + + def _fill_items_impl(self): folder_id = self._selected_folder_id task_id = self._selected_task_id if not folder_id or not task_id: @@ -285,6 +293,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): selection_model.selectionChanged.connect(self._on_selection_change) view.double_clicked.connect(self._on_mouse_double_click) view.customContextMenuRequested.connect(self._on_context_menu) + model.refreshed.connect(self._on_model_refresh) controller.register_event_callback( "expected_selection_changed", @@ -298,6 +307,7 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): self._controller = controller self._published_mode = False + self._change_selection_on_refresh = True def set_published_mode(self, published_mode): """Set the published mode. @@ -379,7 +389,9 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): if not workfile_info["current"]: return + self._change_selection_on_refresh = False self._model.refresh() + self._change_selection_on_refresh = True workfile_name = workfile_info["name"] if ( @@ -394,3 +406,30 @@ class WorkAreaFilesWidget(QtWidgets.QWidget): self._controller.expected_workfile_selected( event["folder"]["id"], event["task"]["name"], workfile_name ) + + def _on_model_refresh(self): + if ( + not self._change_selection_on_refresh + or self._proxy_model.rowCount() < 1 + ): + return + + # Find the row with latest date modified + latest_index = max( + ( + self._proxy_model.index(idx, 0) + for idx in range(self._proxy_model.rowCount()) + ), + key=lambda model_index: model_index.data(DATE_MODIFIED_ROLE) + ) + + # Select row of latest modified + selection_model = self._view.selectionModel() + selection_model.select( + latest_index, + ( + QtCore.QItemSelectionModel.ClearAndSelect + | QtCore.QItemSelectionModel.Current + | QtCore.QItemSelectionModel.Rows + ) + ) diff --git a/client/ayon_core/tools/workfiles/widgets/window.py b/client/ayon_core/tools/workfiles/widgets/window.py index 8a2617d270..1cfae7ec90 100644 --- a/client/ayon_core/tools/workfiles/widgets/window.py +++ b/client/ayon_core/tools/workfiles/widgets/window.py @@ -118,11 +118,11 @@ class WorkfilesToolWindow(QtWidgets.QWidget): overlay_invalid_host = InvalidHostOverlay(self) overlay_invalid_host.setVisible(False) - first_show_timer = QtCore.QTimer() - first_show_timer.setSingleShot(True) - first_show_timer.setInterval(50) + show_timer = QtCore.QTimer() + show_timer.setSingleShot(True) + show_timer.setInterval(50) - first_show_timer.timeout.connect(self._on_first_show) + show_timer.timeout.connect(self._on_show) controller.register_event_callback( "save_as.finished", @@ -159,7 +159,7 @@ class WorkfilesToolWindow(QtWidgets.QWidget): self._tasks_widget = tasks_widget self._side_panel = side_panel - self._first_show_timer = first_show_timer + self._show_timer = show_timer self._post_init() @@ -287,9 +287,9 @@ class WorkfilesToolWindow(QtWidgets.QWidget): def showEvent(self, event): super(WorkfilesToolWindow, self).showEvent(event) + self._show_timer.start() if self._first_show: self._first_show = False - self._first_show_timer.start() self.setStyleSheet(style.load_stylesheet()) def keyPressEvent(self, event): @@ -303,9 +303,8 @@ class WorkfilesToolWindow(QtWidgets.QWidget): pass - def _on_first_show(self): - if not self._controller_refreshed: - self.refresh() + def _on_show(self): + self.refresh() def _on_file_text_filter_change(self, text): self._files_widget.set_text_filter(text) diff --git a/pyproject.toml b/pyproject.toml index c1f6ddfb0b..4726bef41a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -106,7 +106,7 @@ line-ending = "auto" [tool.codespell] # Ignore words that are not in the dictionary. -ignore-words-list = "ayon,ynput,parms,parm,hda,developpement" +ignore-words-list = "ayon,ynput,parms,parm,hda,developpement,ue" skip = "./.*,./package/*,*/vendor/*,*/unreal/integration/*,*/aftereffects/api/extension/js/libs/*" count = true diff --git a/server_addon/aftereffects/package.py b/server_addon/aftereffects/package.py new file mode 100644 index 0000000000..a680b37602 --- /dev/null +++ b/server_addon/aftereffects/package.py @@ -0,0 +1,3 @@ +name = "aftereffects" +title = "AfterEffects" +version = "0.1.3" diff --git a/server_addon/aftereffects/server/__init__.py b/server_addon/aftereffects/server/__init__.py index e14e76e9db..76e6d5b2eb 100644 --- a/server_addon/aftereffects/server/__init__.py +++ b/server_addon/aftereffects/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon from .settings import AfterEffectsSettings, DEFAULT_AFTEREFFECTS_SETTING -from .version import __version__ class AfterEffects(BaseServerAddon): - name = "aftereffects" - title = "AfterEffects" - version = __version__ - settings_model = AfterEffectsSettings async def get_default_settings(self): diff --git a/server_addon/aftereffects/server/version.py b/server_addon/aftereffects/server/version.py deleted file mode 100644 index e57ad00718..0000000000 --- a/server_addon/aftereffects/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.3" diff --git a/server_addon/applications/client/ayon_applications/addon.py b/server_addon/applications/client/ayon_applications/addon.py index 0f1b68af0e..a8eaa46cad 100644 --- a/server_addon/applications/client/ayon_applications/addon.py +++ b/server_addon/applications/client/ayon_applications/addon.py @@ -110,6 +110,26 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): ] } + def launch_application( + self, app_name, project_name, folder_path, task_name + ): + """Launch application. + + Args: + app_name (str): Full application name e.g. 'maya/2024'. + project_name (str): Project name. + folder_path (str): Folder path. + task_name (str): Task name. + + """ + app_manager = self.get_applications_manager() + return app_manager.launch( + app_name, + project_name=project_name, + folder_path=folder_path, + task_name=task_name, + ) + # --- CLI --- def cli(self, addon_click_group): main_group = click_wrap.group( @@ -134,6 +154,17 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): default=None ) ) + ( + main_group.command( + self._cli_launch_applications, + name="launch", + help="Launch application" + ) + .option("--app", required=True, help="Application name") + .option("--project", required=True, help="Project name") + .option("--folder", required=True, help="Folder path") + .option("--task", required=True, help="Task name") + ) # Convert main command to click object and add it to parent group addon_click_group.add_command( main_group.to_click_obj() @@ -171,3 +202,15 @@ class ApplicationsAddon(AYONAddon, IPluginPaths): with open(output_json_path, "w") as file_stream: json.dump(env, file_stream, indent=4) + + def _cli_launch_applications(self, project, folder, task, app): + """Launch application. + + Args: + project (str): Project name. + folder (str): Folder path. + task (str): Task name. + app (str): Full application name e.g. 'maya/2024'. + + """ + self.launch_application(app, project, folder, task) diff --git a/server_addon/applications/client/ayon_applications/utils.py b/server_addon/applications/client/ayon_applications/utils.py index 234fa6c683..185779a949 100644 --- a/server_addon/applications/client/ayon_applications/utils.py +++ b/server_addon/applications/client/ayon_applications/utils.py @@ -281,13 +281,20 @@ def prepare_app_environments( app.environment ] + task_entity = data.get("task_entity") folder_entity = data.get("folder_entity") # Add tools environments groups_by_name = {} tool_by_group_name = collections.defaultdict(dict) - if folder_entity: - # Make sure each tool group can be added only once - for key in folder_entity["attrib"].get("tools") or []: + tools = None + if task_entity: + tools = task_entity["attrib"].get("tools") + + if tools is None and folder_entity: + tools = folder_entity["attrib"].get("tools") + + if tools: + for key in tools: tool = app.manager.tools.get(key) if not tool or not tool.is_valid_for_app(app): continue diff --git a/server_addon/applications/package.py b/server_addon/applications/package.py index ce312ed662..983749355e 100644 --- a/server_addon/applications/package.py +++ b/server_addon/applications/package.py @@ -1,3 +1,10 @@ name = "applications" title = "Applications" -version = "0.2.0" +version = "0.2.2" + +ayon_server_version = ">=1.0.7" +ayon_launcher_version = ">=1.0.2" +ayon_required_addons = { + "core": ">0.3.0", +} +ayon_compatible_addons = {} diff --git a/server_addon/applications/server/applications.json b/server_addon/applications/server/applications.json index e4b72fdff9..84b7fa33cf 100644 --- a/server_addon/applications/server/applications.json +++ b/server_addon/applications/server/applications.json @@ -1271,6 +1271,28 @@ } ] }, + "equalizer": { + "enabled": true, + "label": "3DEqualizer", + "icon": "{}/app_icons/3de4.png", + "host_name": "equalizer", + "environment": "{}", + "variants": [ + { + "name": "7-1v2", + "label": "7.1v2", + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\3DE4_win64_r7.1v2\\bin\\3DE4.exe" + ], + "darwin": [], + "linux": [] + }, + "environment": "{}" + } + ] + }, "additional_apps": [] } } diff --git a/server_addon/applications/server/settings.py b/server_addon/applications/server/settings.py index 5743e9f471..b77686cee0 100644 --- a/server_addon/applications/server/settings.py +++ b/server_addon/applications/server/settings.py @@ -190,6 +190,8 @@ class ApplicationsSettings(BaseSettingsModel): default_factory=AppGroupWithPython, title="OpenRV") zbrush: AppGroup = SettingsField( default_factory=AppGroupWithPython, title="Zbrush") + equalizer: AppGroup = SettingsField( + default_factory=AppGroupWithPython, title="3DEqualizer") additional_apps: list[AdditionalAppGroup] = SettingsField( default_factory=list, title="Additional Applications") diff --git a/server_addon/blender/package.py b/server_addon/blender/package.py new file mode 100644 index 0000000000..667076e533 --- /dev/null +++ b/server_addon/blender/package.py @@ -0,0 +1,3 @@ +name = "blender" +title = "Blender" +version = "0.1.8" diff --git a/server_addon/blender/server/__init__.py b/server_addon/blender/server/__init__.py index a7d6cb4400..b274e3bc29 100644 --- a/server_addon/blender/server/__init__.py +++ b/server_addon/blender/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import BlenderSettings, DEFAULT_VALUES class BlenderAddon(BaseServerAddon): - name = "blender" - title = "Blender" - version = __version__ settings_model: Type[BlenderSettings] = BlenderSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/blender/server/version.py b/server_addon/blender/server/version.py deleted file mode 100644 index 9cb17e7976..0000000000 --- a/server_addon/blender/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.8" diff --git a/server_addon/celaction/package.py b/server_addon/celaction/package.py new file mode 100644 index 0000000000..2b11a8630f --- /dev/null +++ b/server_addon/celaction/package.py @@ -0,0 +1,3 @@ +name = "celaction" +title = "CelAction" +version = "0.1.0" diff --git a/server_addon/celaction/server/__init__.py b/server_addon/celaction/server/__init__.py index 90d3dbaa01..e3769a4b7f 100644 --- a/server_addon/celaction/server/__init__.py +++ b/server_addon/celaction/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import CelActionSettings, DEFAULT_VALUES class CelActionAddon(BaseServerAddon): - name = "celaction" - title = "CelAction" - version = __version__ settings_model: Type[CelActionSettings] = CelActionSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/celaction/server/version.py b/server_addon/celaction/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/celaction/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/clockify/package.py b/server_addon/clockify/package.py new file mode 100644 index 0000000000..bcf9425b3f --- /dev/null +++ b/server_addon/clockify/package.py @@ -0,0 +1,3 @@ +name = "clockify" +title = "Clockify" +version = "0.1.1" diff --git a/server_addon/clockify/server/__init__.py b/server_addon/clockify/server/__init__.py index 0fa453fdf4..11bbfed261 100644 --- a/server_addon/clockify/server/__init__.py +++ b/server_addon/clockify/server/__init__.py @@ -2,14 +2,8 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import ClockifySettings class ClockifyAddon(BaseServerAddon): - name = "clockify" - title = "Clockify" - version = __version__ settings_model: Type[ClockifySettings] = ClockifySettings - frontend_scopes = {} - services = {} diff --git a/server_addon/clockify/server/version.py b/server_addon/clockify/server/version.py deleted file mode 100644 index 485f44ac21..0000000000 --- a/server_addon/clockify/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.1" diff --git a/server_addon/create_ayon_addons.py b/server_addon/create_ayon_addons.py index bfd601af07..f0a36d4740 100644 --- a/server_addon/create_ayon_addons.py +++ b/server_addon/create_ayon_addons.py @@ -5,7 +5,7 @@ import shutil import argparse import zipfile import types -import importlib +import importlib.machinery import platform import collections from pathlib import Path @@ -245,12 +245,8 @@ def create_addon_package( keep_source: bool, ): src_package_py = addon_dir / "package.py" - package = None - if src_package_py.exists(): - package = import_filepath(src_package_py) - addon_version = package.version - else: - addon_version = get_addon_version(addon_dir) + package = import_filepath(src_package_py) + addon_version = package.version addon_output_dir = output_dir / addon_dir.name / addon_version if addon_output_dir.exists(): @@ -259,18 +255,7 @@ def create_addon_package( # Copy server content dst_package_py = addon_output_dir / "package.py" - if package is not None: - shutil.copy(src_package_py, dst_package_py) - else: - addon_name = addon_dir.name - if addon_name == "royal_render": - addon_name = "royalrender" - package_py_content = PACKAGE_PY_TEMPLATE.format( - addon_name=addon_name, addon_version=addon_version - ) - - with open(dst_package_py, "w+") as pkg_py: - pkg_py.write(package_py_content) + shutil.copy(src_package_py, dst_package_py) server_dir = addon_dir / "server" shutil.copytree( diff --git a/server_addon/deadline/package.py b/server_addon/deadline/package.py new file mode 100644 index 0000000000..25ba1c1166 --- /dev/null +++ b/server_addon/deadline/package.py @@ -0,0 +1,3 @@ +name = "deadline" +title = "Deadline" +version = "0.1.11" diff --git a/server_addon/deadline/server/__init__.py b/server_addon/deadline/server/__init__.py index 36d04189a9..8d2dc152cd 100644 --- a/server_addon/deadline/server/__init__.py +++ b/server_addon/deadline/server/__init__.py @@ -2,15 +2,13 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ -from .settings import DeadlineSettings, DEFAULT_VALUES +from .settings import DeadlineSettings, DEFAULT_VALUES, DeadlineSiteSettings class Deadline(BaseServerAddon): - name = "deadline" - title = "Deadline" - version = __version__ settings_model: Type[DeadlineSettings] = DeadlineSettings + site_settings_model: Type[DeadlineSiteSettings] = DeadlineSiteSettings + async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/deadline/server/settings/__init__.py b/server_addon/deadline/server/settings/__init__.py index 0307862afa..d25c0fb330 100644 --- a/server_addon/deadline/server/settings/__init__.py +++ b/server_addon/deadline/server/settings/__init__.py @@ -2,9 +2,11 @@ from .main import ( DeadlineSettings, DEFAULT_VALUES, ) +from .site_settings import DeadlineSiteSettings __all__ = ( "DeadlineSettings", + "DeadlineSiteSettings", "DEFAULT_VALUES", ) diff --git a/server_addon/deadline/server/settings/main.py b/server_addon/deadline/server/settings/main.py index 21a314cd2f..5d42b9b1ef 100644 --- a/server_addon/deadline/server/settings/main.py +++ b/server_addon/deadline/server/settings/main.py @@ -15,12 +15,6 @@ from .publish_plugins import ( ) -class ServerListSubmodel(BaseSettingsModel): - _layout = "compact" - name: str = SettingsField(title="Name") - value: str = SettingsField(title="Value") - - async def defined_deadline_ws_name_enum_resolver( addon: "BaseServerAddon", settings_variant: str = "production", @@ -32,25 +26,40 @@ async def defined_deadline_ws_name_enum_resolver( settings = await addon.get_studio_settings(variant=settings_variant) - ws_urls = [] + ws_server_name = [] for deadline_url_item in settings.deadline_urls: - ws_urls.append(deadline_url_item.name) + ws_server_name.append(deadline_url_item.name) - return ws_urls + return ws_server_name + +class ServerItemSubmodel(BaseSettingsModel): + """Connection info about configured DL servers.""" + _layout = "compact" + name: str = SettingsField(title="Name") + value: str = SettingsField(title="Url") + require_authentication: bool = SettingsField( + False, + title="Require authentication") + ssl: bool = SettingsField(False, + title="SSL") class DeadlineSettings(BaseSettingsModel): - deadline_urls: list[ServerListSubmodel] = SettingsField( + # configured DL servers + deadline_urls: list[ServerItemSubmodel] = SettingsField( default_factory=list, - title="System Deadline Webservice URLs", + title="System Deadline Webservice Info", scope=["studio"], ) + + # name(key) of selected server for project deadline_server: str = SettingsField( - title="Project deadline server", + title="Project Deadline server name", section="---", scope=["project"], enum_resolver=defined_deadline_ws_name_enum_resolver ) + publish: PublishPluginsModel = SettingsField( default_factory=PublishPluginsModel, title="Publish Plugins", @@ -62,11 +71,14 @@ class DeadlineSettings(BaseSettingsModel): return value + DEFAULT_VALUES = { "deadline_urls": [ { "name": "default", - "value": "http://127.0.0.1:8082" + "value": "http://127.0.0.1:8082", + "require_authentication": False, + "ssl": False } ], "deadline_server": "default", diff --git a/server_addon/deadline/server/settings/publish_plugins.py b/server_addon/deadline/server/settings/publish_plugins.py index 9f69143e37..784ad2560b 100644 --- a/server_addon/deadline/server/settings/publish_plugins.py +++ b/server_addon/deadline/server/settings/publish_plugins.py @@ -191,7 +191,6 @@ class NukeSubmitDeadlineModel(BaseSettingsModel): @validator( "limit_groups", - "env_allowed_keys", "env_search_replace_values") def validate_unique_names(cls, value): ensure_unique_names(value) diff --git a/server_addon/deadline/server/settings/site_settings.py b/server_addon/deadline/server/settings/site_settings.py new file mode 100644 index 0000000000..a77a6edc7e --- /dev/null +++ b/server_addon/deadline/server/settings/site_settings.py @@ -0,0 +1,26 @@ +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField, +) +from .main import defined_deadline_ws_name_enum_resolver + + +class CredentialPerServerModel(BaseSettingsModel): + """Provide credentials for configured DL servers""" + _layout = "expanded" + server_name: str = SettingsField("", + title="DL server name", + enum_resolver=defined_deadline_ws_name_enum_resolver) + username: str = SettingsField("", + title="Username") + password: str = SettingsField("", + title="Password") + + +class DeadlineSiteSettings(BaseSettingsModel): + local_settings: list[CredentialPerServerModel] = SettingsField( + default_factory=list, + title="Local setting", + description="Please provide credentials for configured Deadline servers", + ) + diff --git a/server_addon/flame/package.py b/server_addon/flame/package.py new file mode 100644 index 0000000000..8c077ed91d --- /dev/null +++ b/server_addon/flame/package.py @@ -0,0 +1,3 @@ +name = "flame" +title = "Flame" +version = "0.1.0" diff --git a/server_addon/flame/server/__init__.py b/server_addon/flame/server/__init__.py index 7d5eb3960f..4aa46617ee 100644 --- a/server_addon/flame/server/__init__.py +++ b/server_addon/flame/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import FlameSettings, DEFAULT_VALUES class FlameAddon(BaseServerAddon): - name = "flame" - title = "Flame" - version = __version__ settings_model: Type[FlameSettings] = FlameSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/flame/server/version.py b/server_addon/flame/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/flame/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/fusion/package.py b/server_addon/fusion/package.py new file mode 100644 index 0000000000..9e7a46df2c --- /dev/null +++ b/server_addon/fusion/package.py @@ -0,0 +1,3 @@ +name = "fusion" +title = "Fusion" +version = "0.1.5" diff --git a/server_addon/fusion/server/__init__.py b/server_addon/fusion/server/__init__.py index 4d43f28812..0456cfd5ee 100644 --- a/server_addon/fusion/server/__init__.py +++ b/server_addon/fusion/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import FusionSettings, DEFAULT_VALUES class FusionAddon(BaseServerAddon): - name = "fusion" - title = "Fusion" - version = __version__ settings_model: Type[FusionSettings] = FusionSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/fusion/server/version.py b/server_addon/fusion/server/version.py deleted file mode 100644 index 1276d0254f..0000000000 --- a/server_addon/fusion/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.5" diff --git a/server_addon/harmony/package.py b/server_addon/harmony/package.py new file mode 100644 index 0000000000..83e88e7d57 --- /dev/null +++ b/server_addon/harmony/package.py @@ -0,0 +1,3 @@ +name = "harmony" +title = "Harmony" +version = "0.1.2" diff --git a/server_addon/harmony/server/__init__.py b/server_addon/harmony/server/__init__.py index 4ecda1989e..154618241e 100644 --- a/server_addon/harmony/server/__init__.py +++ b/server_addon/harmony/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon from .settings import HarmonySettings, DEFAULT_HARMONY_SETTING -from .version import __version__ class Harmony(BaseServerAddon): - name = "harmony" - title = "Harmony" - version = __version__ - settings_model = HarmonySettings async def get_default_settings(self): diff --git a/server_addon/harmony/server/version.py b/server_addon/harmony/server/version.py deleted file mode 100644 index df0c92f1e2..0000000000 --- a/server_addon/harmony/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.2" diff --git a/server_addon/hiero/package.py b/server_addon/hiero/package.py new file mode 100644 index 0000000000..54c2f74fa7 --- /dev/null +++ b/server_addon/hiero/package.py @@ -0,0 +1,3 @@ +name = "hiero" +title = "Hiero" +version = "0.1.3" diff --git a/server_addon/hiero/server/__init__.py b/server_addon/hiero/server/__init__.py index d0f9bcefc3..3db78eafd7 100644 --- a/server_addon/hiero/server/__init__.py +++ b/server_addon/hiero/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import HieroSettings, DEFAULT_VALUES class HieroAddon(BaseServerAddon): - name = "hiero" - title = "Hiero" - version = __version__ settings_model: Type[HieroSettings] = HieroSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/hiero/server/settings/imageio.py b/server_addon/hiero/server/settings/imageio.py index f2bc71ac33..9e15e15597 100644 --- a/server_addon/hiero/server/settings/imageio.py +++ b/server_addon/hiero/server/settings/imageio.py @@ -149,15 +149,15 @@ class ImageIOSettings(BaseSettingsModel): DEFAULT_IMAGEIO_SETTINGS = { "workfile": { - "ocioConfigName": "nuke-default", - "workingSpace": "linear", - "viewerLut": "sRGB", - "eightBitLut": "sRGB", - "sixteenBitLut": "sRGB", - "logLut": "Cineon", - "floatLut": "linear", - "thumbnailLut": "sRGB", - "monitorOutLut": "sRGB" + "ocioConfigName": "aces_1.2", + "workingSpace": "role_scene_linear", + "viewerLut": "ACES/sRGB", + "eightBitLut": "role_matte_paint", + "sixteenBitLut": "role_texture_paint", + "logLut": "role_compositing_log", + "floatLut": "role_scene_linear", + "thumbnailLut": "ACES/sRGB", + "monitorOutLut": "ACES/sRGB" }, "regexInputs": { "inputs": [ diff --git a/server_addon/hiero/server/version.py b/server_addon/hiero/server/version.py deleted file mode 100644 index b3f4756216..0000000000 --- a/server_addon/hiero/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.2" diff --git a/server_addon/houdini/package.py b/server_addon/houdini/package.py new file mode 100644 index 0000000000..4e441c76ae --- /dev/null +++ b/server_addon/houdini/package.py @@ -0,0 +1,3 @@ +name = "houdini" +title = "Houdini" +version = "0.2.13" diff --git a/server_addon/houdini/server/__init__.py b/server_addon/houdini/server/__init__.py index 870ec2d0b7..8c1ffcb0b3 100644 --- a/server_addon/houdini/server/__init__.py +++ b/server_addon/houdini/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import HoudiniSettings, DEFAULT_VALUES class Houdini(BaseServerAddon): - name = "houdini" - title = "Houdini" - version = __version__ settings_model: Type[HoudiniSettings] = HoudiniSettings async def get_default_settings(self): diff --git a/server_addon/houdini/server/settings/imageio.py b/server_addon/houdini/server/settings/imageio.py index f4850c5df7..c4f4813d51 100644 --- a/server_addon/houdini/server/settings/imageio.py +++ b/server_addon/houdini/server/settings/imageio.py @@ -34,6 +34,34 @@ class ImageIOFileRulesModel(BaseSettingsModel): return value +class WorkfileImageIOModel(BaseSettingsModel): + """Workfile settings help. + + Empty values will be skipped, allowing any existing env vars to + pass through as defined. + + Note: The render space in Houdini is + always set to the 'scene_linear' role.""" + + enabled: bool = SettingsField(False, title="Enabled") + default_display: str = SettingsField( + title="Default active displays", + description="It behaves like the 'OCIO_ACTIVE_DISPLAYS' env var," + " Colon-separated list of displays, e.g ACES:P3" + ) + default_view: str = SettingsField( + title="Default active views", + description="It behaves like the 'OCIO_ACTIVE_VIEWS' env var," + " Colon-separated list of views, e.g sRGB:DCDM" + ) + review_color_space: str = SettingsField( + title="Review colorspace", + description="It exposes OCIO Colorspace parameter in opengl nodes." + "if left empty, Ayon will figure out the default " + "colorspace using your default display and default view." + ) + + class HoudiniImageIOModel(BaseSettingsModel): activate_host_color_management: bool = SettingsField( True, title="Enable Color Management" @@ -46,3 +74,26 @@ class HoudiniImageIOModel(BaseSettingsModel): default_factory=ImageIOFileRulesModel, title="File Rules" ) + workfile: WorkfileImageIOModel = SettingsField( + default_factory=WorkfileImageIOModel, + title="Workfile" + ) + + +DEFAULT_IMAGEIO_SETTINGS = { + "activate_host_color_management": False, + "ocio_config": { + "override_global_config": False, + "filepath": [] + }, + "file_rules": { + "activate_host_rules": False, + "rules": [] + }, + "workfile": { + "enabled": False, + "default_display": "ACES", + "default_view": "sRGB", + "review_color_space": "" + } +} diff --git a/server_addon/houdini/server/settings/main.py b/server_addon/houdini/server/settings/main.py index cbb19d15b7..3acab0ce74 100644 --- a/server_addon/houdini/server/settings/main.py +++ b/server_addon/houdini/server/settings/main.py @@ -3,7 +3,10 @@ from .general import ( GeneralSettingsModel, DEFAULT_GENERAL_SETTINGS ) -from .imageio import HoudiniImageIOModel +from .imageio import ( + HoudiniImageIOModel, + DEFAULT_IMAGEIO_SETTINGS +) from .shelves import ShelvesModel from .create import ( CreatePluginsModel, @@ -40,6 +43,7 @@ class HoudiniSettings(BaseSettingsModel): DEFAULT_VALUES = { "general": DEFAULT_GENERAL_SETTINGS, + "imageio": DEFAULT_IMAGEIO_SETTINGS, "shelves": [], "create": DEFAULT_HOUDINI_CREATE_SETTINGS, "publish": DEFAULT_HOUDINI_PUBLISH_SETTINGS diff --git a/server_addon/houdini/server/version.py b/server_addon/houdini/server/version.py deleted file mode 100644 index b5c9b6cb71..0000000000 --- a/server_addon/houdini/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.2.12" diff --git a/server_addon/max/package.py b/server_addon/max/package.py new file mode 100644 index 0000000000..fb1f1b3050 --- /dev/null +++ b/server_addon/max/package.py @@ -0,0 +1,3 @@ +name = "max" +title = "Max" +version = "0.1.7" diff --git a/server_addon/max/server/__init__.py b/server_addon/max/server/__init__.py index 31c694a084..d03b29d249 100644 --- a/server_addon/max/server/__init__.py +++ b/server_addon/max/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import MaxSettings, DEFAULT_VALUES class MaxAddon(BaseServerAddon): - name = "max" - title = "Max" - version = __version__ settings_model: Type[MaxSettings] = MaxSettings async def get_default_settings(self): diff --git a/server_addon/max/server/version.py b/server_addon/max/server/version.py deleted file mode 100644 index f1380eede2..0000000000 --- a/server_addon/max/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.7" diff --git a/server_addon/maya/package.py b/server_addon/maya/package.py new file mode 100644 index 0000000000..fe3e3039f5 --- /dev/null +++ b/server_addon/maya/package.py @@ -0,0 +1,3 @@ +name = "maya" +title = "Maya" +version = "0.1.18" diff --git a/server_addon/maya/server/__init__.py b/server_addon/maya/server/__init__.py index 8784427dcf..6dda2cdd77 100644 --- a/server_addon/maya/server/__init__.py +++ b/server_addon/maya/server/__init__.py @@ -2,13 +2,9 @@ from ayon_server.addons import BaseServerAddon from .settings.main import MayaSettings, DEFAULT_MAYA_SETTING -from .version import __version__ class MayaAddon(BaseServerAddon): - name = "maya" - title = "Maya" - version = __version__ settings_model = MayaSettings async def get_default_settings(self): diff --git a/server_addon/maya/server/settings/publishers.py b/server_addon/maya/server/settings/publishers.py index 27288053a2..20523b2ca9 100644 --- a/server_addon/maya/server/settings/publishers.py +++ b/server_addon/maya/server/settings/publishers.py @@ -35,6 +35,50 @@ def angular_unit_enum(): ] +def extract_alembic_data_format_enum(): + return [ + {"label": "ogawa", "value": "ogawa"}, + {"label": "HDF", "value": "HDF"} + ] + + +def extract_alembic_overrides_enum(): + return [ + {"label": "Custom Attributes", "value": "attr"}, + {"label": "Custom Attributes Prefix", "value": "attrPrefix"}, + {"label": "Data Format", "value": "dataFormat"}, + {"label": "Euler Filter", "value": "eulerFilter"}, + {"label": "Mel Per Frame Callback", "value": "melPerFrameCallback"}, + {"label": "Mel Post Job Callback", "value": "melPostJobCallback"}, + {"label": "Pre Roll", "value": "preRoll"}, + {"label": "Pre Roll Start Frame", "value": "preRollStartFrame"}, + { + "label": "Python Per Frame Callback", + "value": "pythonPerFrameCallback" + }, + { + "label": "Python Post Job Callback", + "value": "pythonPostJobCallback" + }, + {"label": "Renderable Only", "value": "renderableOnly"}, + {"label": "Strip Namespaces", "value": "stripNamespaces"}, + {"label": "User Attr", "value": "userAttr"}, + {"label": "User Attr Prefix", "value": "userAttrPrefix"}, + {"label": "UV Write", "value": "uvWrite"}, + {"label": "UVs Only", "value": "uvsOnly"}, + {"label": "Verbose", "value": "verbose"}, + {"label": "Visible Only", "value": "visibleOnly"}, + {"label": "Whole Frame Geo", "value": "wholeFrameGeo"}, + {"label": "World Space", "value": "worldSpace"}, + {"label": "Write Color Sets", "value": "writeColorSets"}, + {"label": "Write Creases", "value": "writeCreases"}, + {"label": "Write Face Sets", "value": "writeFaceSets"}, + {"label": "Write Normals", "value": "writeNormals"}, + {"label": "Write UV Sets", "value": "writeUVSets"}, + {"label": "Write Visibility", "value": "writeVisibility"} + ] + + class BasicValidateModel(BaseSettingsModel): enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") @@ -184,7 +228,7 @@ class ValidateAttributesModel(BaseSettingsModel): if not success: raise BadRequestException( - "The attibutes can't be parsed as json object" + "The attributes can't be parsed as json object" ) return value @@ -220,7 +264,7 @@ class ValidateUnrealStaticMeshNameModel(BaseSettingsModel): enabled: bool = SettingsField(title="ValidateUnrealStaticMeshName") optional: bool = SettingsField(title="Optional") validate_mesh: bool = SettingsField(title="Validate mesh names") - validate_collision: bool = SettingsField(title="Validate collison names") + validate_collision: bool = SettingsField(title="Validate collision names") class ValidateCycleErrorModel(BaseSettingsModel): @@ -243,7 +287,7 @@ class ValidatePluginPathAttributesModel(BaseSettingsModel): and the node attribute is abc_file """ - enabled: bool = True + enabled: bool = SettingsField(title="Enabled") optional: bool = SettingsField(title="Optional") active: bool = SettingsField(title="Active") attribute: list[ValidatePluginPathAttributesAttrModel] = SettingsField( @@ -265,6 +309,9 @@ class RendererAttributesModel(BaseSettingsModel): class ValidateRenderSettingsModel(BaseSettingsModel): + enabled: bool = SettingsField(title="Enabled") + optional: bool = SettingsField(title="Optional") + active: bool = SettingsField(title="Active") arnold_render_attributes: list[RendererAttributesModel] = SettingsField( default_factory=list, title="Arnold Render Attributes") vray_render_attributes: list[RendererAttributesModel] = SettingsField( @@ -299,6 +346,108 @@ class ExtractAlembicModel(BaseSettingsModel): families: list[str] = SettingsField( default_factory=list, title="Families") + eulerFilter: bool = SettingsField( + title="Euler Filter", + description="Apply Euler filter while sampling rotations." + ) + renderableOnly: bool = SettingsField( + title="Renderable Only", + description="Only export renderable visible shapes." + ) + stripNamespaces: bool = SettingsField( + title="Strip Namespaces", + description=( + "Namespaces will be stripped off of the node before being written " + "to Alembic." + ) + ) + uvsOnly: bool = SettingsField( + title="UVs Only", + description=( + "If this flag is present, only uv data for PolyMesh and SubD " + "shapes will be written to the Alembic file." + ) + ) + uvWrite: bool = SettingsField( + title="UV Write", + description=( + "Uv data for PolyMesh and SubD shapes will be written to the " + "Alembic file." + ) + ) + verbose: bool = SettingsField( + title="Verbose", + description="Prints the current frame that is being evaluated." + ) + visibleOnly: bool = SettingsField( + title="Visible Only", + description="Only export dag objects visible during frame range." + ) + wholeFrameGeo: bool = SettingsField( + title="Whole Frame Geo", + description=( + "Data for geometry will only be written out on whole frames." + ) + ) + worldSpace: bool = SettingsField( + title="World Space", + description="Any root nodes will be stored in world space." + ) + writeColorSets: bool = SettingsField( + title="Write Color Sets", + description="Write vertex colors with the geometry." + ) + writeCreases: bool = SettingsField( + title="Write Creases", + description="Write the geometry's edge and vertex crease information." + ) + writeFaceSets: bool = SettingsField( + title="Write Face Sets", + description="Write face sets with the geometry." + ) + writeNormals: bool = SettingsField( + title="Write Normals", + description="Write normals with the deforming geometry." + ) + writeUVSets: bool = SettingsField( + title="Write UV Sets", + description=( + "Write all uv sets on MFnMeshes as vector 2 indexed geometry " + "parameters with face varying scope." + ) + ) + writeVisibility: bool = SettingsField( + title="Write Visibility", + description=( + "Visibility state will be stored in the Alembic file. Otherwise " + "everything written out is treated as visible." + ) + ) + preRoll: bool = SettingsField( + title="Pre Roll", + description=( + "When enabled, the pre roll start frame is used to pre roll the " + "When enabled, the pre roll start frame is used to being the " + "evaluation of the mesh. From the pre roll start frame to the " + "alembic start frame, will not be written to disk. This can be " + "used for simulation run up." + ) + ) + preRollStartFrame: int = SettingsField( + title="Pre Roll Start Frame", + description=( + "The frame to start scene evaluation at. This is used to set the " + "starting frame for time dependent translations and can be used to" + " evaluate run-up that isn't actually translated.\n" + "NOTE: Pre Roll needs to be enabled for this start frame " + "to be considered." + ) + ) + dataFormat: str = SettingsField( + enum_resolver=extract_alembic_data_format_enum, + title="Data Format", + description="The data format to use to write the file." + ) bake_attributes: list[str] = SettingsField( default_factory=list, title="Bake Attributes", description="List of attributes that will be included in the alembic " @@ -309,6 +458,73 @@ class ExtractAlembicModel(BaseSettingsModel): description="List of attribute prefixes for attributes that will be " "included in the alembic export.", ) + attr: str = SettingsField( + title="Custom Attributes", + placeholder="attr1;attr2", + description=( + "Attributes matching by name will be included in the Alembic " + "export. Attributes should be separated by semi-colon `;`" + ) + ) + attrPrefix: str = SettingsField( + title="Custom Attributes Prefix", + placeholder="prefix1;prefix2", + description=( + "Attributes starting with these prefixes will be included in the " + "Alembic export. Attributes should be separated by semi-colon `;`" + ) + ) + userAttr: str = SettingsField( + title="User Attr", + placeholder="attr1;attr2", + description=( + "Attributes matching by name will be included in the Alembic " + "export. Attributes should be separated by semi-colon `;`" + ) + ) + userAttrPrefix: str = SettingsField( + title="User Attr Prefix", + placeholder="prefix1;prefix2", + description=( + "Attributes starting with these prefixes will be included in the " + "Alembic export. Attributes should be separated by semi-colon `;`" + ) + ) + melPerFrameCallback: str = SettingsField( + title="Mel Per Frame Callback", + description=( + "When each frame (and the static frame) is evaluated the string " + "specified is evaluated as a Mel command." + ) + ) + melPostJobCallback: str = SettingsField( + title="Mel Post Job Callback", + description=( + "When the translation has finished the string specified is " + "evaluated as a Mel command." + ) + ) + pythonPerFrameCallback: str = SettingsField( + title="Python Per Frame Callback", + description=( + "When each frame (and the static frame) is evaluated the string " + "specified is evaluated as a python command." + ) + ) + pythonPostJobCallback: str = SettingsField( + title="Python Post Job Callback", + description=( + "When the translation has finished the string specified is " + "evaluated as a python command." + ) + ) + overrides: list[str] = SettingsField( + enum_resolver=extract_alembic_overrides_enum, + title="Exposed Overrides", + description=( + "Expose the attribute in this list to the user when publishing." + ) + ) class ExtractObjModel(BaseSettingsModel): @@ -392,7 +608,7 @@ class ExtractGPUCacheModel(BaseSettingsModel): title="Optimize Animations For Motion Blur" ) writeMaterials: bool = SettingsField(title="Write Materials") - useBaseTessellation: bool = SettingsField(title="User Base Tesselation") + useBaseTessellation: bool = SettingsField(title="User Based Tessellation") class PublishersModel(BaseSettingsModel): @@ -668,15 +884,19 @@ class PublishersModel(BaseSettingsModel): default_factory=BasicValidateModel, title="Validate Alembic Visible Node", ) + ValidateAlembicDefaultsPointcache: BasicValidateModel = SettingsField( + default_factory=BasicValidateModel, + title="Validate Alembic Defaults Pointcache" + ) + ValidateAlembicDefaultsAnimation: BasicValidateModel = SettingsField( + default_factory=BasicValidateModel, + title="Validate Alembic Defaults Animation" + ) ExtractProxyAlembic: ExtractProxyAlembicModel = SettingsField( default_factory=ExtractProxyAlembicModel, title="Extract Proxy Alembic", section="Model Extractors", ) - ExtractAlembic: ExtractAlembicModel = SettingsField( - default_factory=ExtractAlembicModel, - title="Extract Alembic", - ) ExtractObj: ExtractObjModel = SettingsField( default_factory=ExtractObjModel, title="Extract OBJ" @@ -811,6 +1031,10 @@ class PublishersModel(BaseSettingsModel): default_factory=ExtractModelModel, title="Extract Model (Maya Scene)" ) + ExtractAlembic: ExtractAlembicModel = SettingsField( + default_factory=ExtractAlembicModel, + title="Extract Alembic" + ) DEFAULT_SUFFIX_NAMING = { @@ -942,6 +1166,9 @@ DEFAULT_PUBLISH_SETTINGS = { ] }, "ValidateRenderSettings": { + "enabled": True, + "active": True, + "optional": False, "arnold_render_attributes": [], "vray_render_attributes": [], "redshift_render_attributes": [], @@ -1200,16 +1427,6 @@ DEFAULT_PUBLISH_SETTINGS = { "proxyAbc" ] }, - "ExtractAlembic": { - "enabled": True, - "families": [ - "pointcache", - "model", - "vrayproxy.alembic" - ], - "bake_attributes": [], - "bake_attribute_prefixes": [] - }, "ExtractObj": { "enabled": False, "optional": True, @@ -1330,6 +1547,16 @@ DEFAULT_PUBLISH_SETTINGS = { "optional": False, "validate_shapes": True }, + "ValidateAlembicDefaultsPointcache": { + "enabled": True, + "optional": True, + "active": True + }, + "ValidateAlembicDefaultsAnimation": { + "enabled": True, + "optional": True, + "active": True + }, "ExtractPlayblast": DEFAULT_PLAYBLAST_SETTING, "ExtractMayaSceneRaw": { "enabled": True, @@ -1371,6 +1598,52 @@ DEFAULT_PUBLISH_SETTINGS = { "ExtractModel": { "enabled": True, "optional": True, - "active": True, + "active": True + }, + "ExtractAlembic": { + "enabled": True, + "families": [ + "pointcache", + "model", + "vrayproxy.alembic" + ], + "attr": "", + "attrPrefix": "", + "bake_attributes": [], + "bake_attribute_prefixes": [], + "dataFormat": "ogawa", + "eulerFilter": False, + "melPerFrameCallback": "", + "melPostJobCallback": "", + "overrides": [ + "attr", + "attrPrefix", + "renderableOnly", + "visibleOnly", + "worldSpace", + "writeColorSets", + "writeFaceSets", + "writeNormals" + ], + "preRoll": False, + "preRollStartFrame": 0, + "pythonPerFrameCallback": "", + "pythonPostJobCallback": "", + "renderableOnly": False, + "stripNamespaces": True, + "uvsOnly": False, + "uvWrite": True, + "userAttr": "", + "userAttrPrefix": "", + "verbose": False, + "visibleOnly": False, + "wholeFrameGeo": False, + "worldSpace": True, + "writeColorSets": False, + "writeCreases": False, + "writeFaceSets": False, + "writeNormals": True, + "writeUVSets": False, + "writeVisibility": False } } diff --git a/server_addon/maya/server/version.py b/server_addon/maya/server/version.py deleted file mode 100644 index c1b7ff9d79..0000000000 --- a/server_addon/maya/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.16" diff --git a/server_addon/nuke/package.py b/server_addon/nuke/package.py new file mode 100644 index 0000000000..bf03c4e7e7 --- /dev/null +++ b/server_addon/nuke/package.py @@ -0,0 +1,3 @@ +name = "nuke" +title = "Nuke" +version = "0.1.11" diff --git a/server_addon/nuke/server/__init__.py b/server_addon/nuke/server/__init__.py index 032ceea5fb..aeb5e36675 100644 --- a/server_addon/nuke/server/__init__.py +++ b/server_addon/nuke/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import NukeSettings, DEFAULT_VALUES class NukeAddon(BaseServerAddon): - name = "nuke" - title = "Nuke" - version = __version__ settings_model: Type[NukeSettings] = NukeSettings async def get_default_settings(self): diff --git a/server_addon/nuke/server/settings/imageio.py b/server_addon/nuke/server/settings/imageio.py index 1b84457133..9cdb0bf1d7 100644 --- a/server_addon/nuke/server/settings/imageio.py +++ b/server_addon/nuke/server/settings/imageio.py @@ -97,8 +97,23 @@ class WorkfileColorspaceSettings(BaseSettingsModel): working_space: str = SettingsField( title="Working Space" ) - thumbnail_space: str = SettingsField( - title="Thumbnail Space" + monitor_lut: str = SettingsField( + title="Thumbnails" + ) + monitor_out_lut: str = SettingsField( + title="Monitor Out" + ) + int_8_lut: str = SettingsField( + title="8-bit Files" + ) + int_16_lut: str = SettingsField( + title="16-bit Files" + ) + log_lut: str = SettingsField( + title="Log Files" + ) + float_lut: str = SettingsField( + title="Float Files" ) @@ -120,6 +135,9 @@ class ViewProcessModel(BaseSettingsModel): viewerProcess: str = SettingsField( title="Viewer Process Name" ) + output_transform: str = SettingsField( + title="Output Transform" + ) class ImageIOConfigModel(BaseSettingsModel): @@ -214,16 +232,23 @@ class ImageIOSettings(BaseSettingsModel): DEFAULT_IMAGEIO_SETTINGS = { "viewer": { - "viewerProcess": "sRGB (default)" + "viewerProcess": "ACES/sRGB", + "output_transform": "ACES/sRGB" }, "baking": { - "viewerProcess": "rec709 (default)" + "viewerProcess": "ACES/Rec.709", + "output_transform": "ACES/Rec.709" }, "workfile": { "color_management": "OCIO", - "native_ocio_config": "nuke-default", - "working_space": "scene_linear", - "thumbnail_space": "sRGB (default)", + "native_ocio_config": "aces_1.2", + "working_space": "role_scene_linear", + "monitor_lut": "ACES/sRGB", + "monitor_out_lut": "ACES/sRGB", + "int_8_lut": "role_matte_paint", + "int_16_lut": "role_texture_paint", + "log_lut": "role_compositing_log", + "float_lut": "role_scene_linear" }, "nodes": { "required_nodes": [ diff --git a/server_addon/nuke/server/version.py b/server_addon/nuke/server/version.py deleted file mode 100644 index 569b1212f7..0000000000 --- a/server_addon/nuke/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.10" diff --git a/server_addon/photoshop/package.py b/server_addon/photoshop/package.py new file mode 100644 index 0000000000..25615529d1 --- /dev/null +++ b/server_addon/photoshop/package.py @@ -0,0 +1,3 @@ +name = "photoshop" +title = "Photoshop" +version = "0.1.2" diff --git a/server_addon/photoshop/server/__init__.py b/server_addon/photoshop/server/__init__.py index 3a45f7a809..86d1025a2d 100644 --- a/server_addon/photoshop/server/__init__.py +++ b/server_addon/photoshop/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon from .settings import PhotoshopSettings, DEFAULT_PHOTOSHOP_SETTING -from .version import __version__ class Photoshop(BaseServerAddon): - name = "photoshop" - title = "Photoshop" - version = __version__ - settings_model = PhotoshopSettings async def get_default_settings(self): diff --git a/server_addon/photoshop/server/version.py b/server_addon/photoshop/server/version.py deleted file mode 100644 index df0c92f1e2..0000000000 --- a/server_addon/photoshop/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.2" diff --git a/server_addon/resolve/package.py b/server_addon/resolve/package.py new file mode 100644 index 0000000000..cf92413bce --- /dev/null +++ b/server_addon/resolve/package.py @@ -0,0 +1,3 @@ +name = "resolve" +title = "DaVinci Resolve" +version = "0.1.0" diff --git a/server_addon/resolve/server/__init__.py b/server_addon/resolve/server/__init__.py index a84180d0f5..35d2db19e4 100644 --- a/server_addon/resolve/server/__init__.py +++ b/server_addon/resolve/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import ResolveSettings, DEFAULT_VALUES class ResolveAddon(BaseServerAddon): - name = "resolve" - title = "DaVinci Resolve" - version = __version__ settings_model: Type[ResolveSettings] = ResolveSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/resolve/server/version.py b/server_addon/resolve/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/resolve/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/royal_render/server/version.py b/server_addon/royal_render/server/version.py deleted file mode 100644 index 485f44ac21..0000000000 --- a/server_addon/royal_render/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.1" diff --git a/server_addon/royalrender/package.py b/server_addon/royalrender/package.py new file mode 100644 index 0000000000..1fdea4abbb --- /dev/null +++ b/server_addon/royalrender/package.py @@ -0,0 +1,3 @@ +name = "royalrender" +title = "Royal Render" +version = "0.1.1" diff --git a/server_addon/royal_render/server/__init__.py b/server_addon/royalrender/server/__init__.py similarity index 77% rename from server_addon/royal_render/server/__init__.py rename to server_addon/royalrender/server/__init__.py index c5f0aafa00..5b10678136 100644 --- a/server_addon/royal_render/server/__init__.py +++ b/server_addon/royalrender/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import RoyalRenderSettings, DEFAULT_VALUES class RoyalRenderAddon(BaseServerAddon): - name = "royalrender" - version = __version__ - title = "Royal Render" settings_model: Type[RoyalRenderSettings] = RoyalRenderSettings async def get_default_settings(self): diff --git a/server_addon/royal_render/server/settings.py b/server_addon/royalrender/server/settings.py similarity index 100% rename from server_addon/royal_render/server/settings.py rename to server_addon/royalrender/server/settings.py diff --git a/server_addon/substancepainter/package.py b/server_addon/substancepainter/package.py new file mode 100644 index 0000000000..d445b0059f --- /dev/null +++ b/server_addon/substancepainter/package.py @@ -0,0 +1,3 @@ +name = "substancepainter" +title = "Substance Painter" +version = "0.1.1" diff --git a/server_addon/substancepainter/server/__init__.py b/server_addon/substancepainter/server/__init__.py index 2bf808d508..f6cd51e610 100644 --- a/server_addon/substancepainter/server/__init__.py +++ b/server_addon/substancepainter/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import SubstancePainterSettings, DEFAULT_SPAINTER_SETTINGS class SubstancePainterAddon(BaseServerAddon): - name = "substancepainter" - title = "Substance Painter" - version = __version__ settings_model: Type[SubstancePainterSettings] = SubstancePainterSettings async def get_default_settings(self): diff --git a/server_addon/substancepainter/server/settings/load_plugins.py b/server_addon/substancepainter/server/settings/load_plugins.py new file mode 100644 index 0000000000..e6b2fd86c3 --- /dev/null +++ b/server_addon/substancepainter/server/settings/load_plugins.py @@ -0,0 +1,122 @@ +from ayon_server.settings import BaseSettingsModel, SettingsField + + +def normal_map_format_enum(): + return [ + {"label": "DirectX", "value": "NormalMapFormat.DirectX"}, + {"label": "OpenGL", "value": "NormalMapFormat.OpenGL"}, + ] + + +def tangent_space_enum(): + return [ + {"label": "Per Fragment", "value": "TangentSpace.PerFragment"}, + {"label": "Per Vertex", "value": "TangentSpace.PerVertex"}, + ] + + +def uv_workflow_enum(): + return [ + {"label": "Default", "value": "ProjectWorkflow.Default"}, + {"label": "UV Tile", "value": "ProjectWorkflow.UVTile"}, + {"label": "Texture Set Per UV Tile", + "value": "ProjectWorkflow.TextureSetPerUVTile"} + ] + + +def document_resolution_enum(): + return [ + {"label": "128", "value": 128}, + {"label": "256", "value": 256}, + {"label": "512", "value": 512}, + {"label": "1024", "value": 1024}, + {"label": "2048", "value": 2048}, + {"label": "4096", "value": 4096} + ] + + +class ProjectTemplatesModel(BaseSettingsModel): + _layout = "expanded" + name: str = SettingsField("default", title="Template Name") + default_texture_resolution: int = SettingsField( + 1024, enum_resolver=document_resolution_enum, + title="Document Resolution", + description=("Set texture resolution when " + "creating new project.") + ) + import_cameras: bool = SettingsField( + True, title="Import Cameras", + description="Import cameras from the mesh file.") + normal_map_format: str = SettingsField( + "DirectX", enum_resolver=normal_map_format_enum, + title="Normal Map Format", + description=("Set normal map format when " + "creating new project.") + ) + project_workflow: str = SettingsField( + "Default", enum_resolver=uv_workflow_enum, + title="UV Tile Settings", + description=("Set UV workflow when " + "creating new project.") + ) + tangent_space_mode: str = SettingsField( + "PerFragment", enum_resolver=tangent_space_enum, + title="Tangent Space", + description=("An option to compute tangent space " + "when creating new project.") + ) + preserve_strokes: bool = SettingsField( + True, title="Preserve Strokes", + description=("Preserve strokes positions on mesh.\n" + "(only relevant when loading into " + "existing project)") + ) + + +class ProjectTemplateSettingModel(BaseSettingsModel): + project_templates: list[ProjectTemplatesModel] = SettingsField( + default_factory=ProjectTemplatesModel, + title="Project Templates" + ) + + +class LoadersModel(BaseSettingsModel): + SubstanceLoadProjectMesh: ProjectTemplateSettingModel = SettingsField( + default_factory=ProjectTemplateSettingModel, + title="Load Mesh" + ) + + +DEFAULT_LOADER_SETTINGS = { + "SubstanceLoadProjectMesh": { + "project_templates": [ + { + "name": "2K(Default)", + "default_texture_resolution": 2048, + "import_cameras": True, + "normal_map_format": "NormalMapFormat.DirectX", + "project_workflow": "ProjectWorkflow.Default", + "tangent_space_mode": "TangentSpace.PerFragment", + "preserve_strokes": True + }, + { + "name": "2K(UV tile)", + "default_texture_resolution": 2048, + "import_cameras": True, + "normal_map_format": "NormalMapFormat.DirectX", + "project_workflow": "ProjectWorkflow.UVTile", + "tangent_space_mode": "TangentSpace.PerFragment", + "preserve_strokes": True + }, + { + "name": "4K(Custom)", + "default_texture_resolution": 4096, + "import_cameras": True, + "normal_map_format": "NormalMapFormat.OpenGL", + "project_workflow": "ProjectWorkflow.UVTile", + "tangent_space_mode": "TangentSpace.PerFragment", + "preserve_strokes": True + } + ] + } +} diff --git a/server_addon/substancepainter/server/settings/main.py b/server_addon/substancepainter/server/settings/main.py index f80fa9fe1e..93523fd650 100644 --- a/server_addon/substancepainter/server/settings/main.py +++ b/server_addon/substancepainter/server/settings/main.py @@ -1,5 +1,6 @@ from ayon_server.settings import BaseSettingsModel, SettingsField from .imageio import ImageIOSettings, DEFAULT_IMAGEIO_SETTINGS +from .load_plugins import LoadersModel, DEFAULT_LOADER_SETTINGS class ShelvesSettingsModel(BaseSettingsModel): @@ -17,9 +18,12 @@ class SubstancePainterSettings(BaseSettingsModel): default_factory=list, title="Shelves" ) + load: LoadersModel = SettingsField( + default_factory=DEFAULT_LOADER_SETTINGS, title="Loaders") DEFAULT_SPAINTER_SETTINGS = { "imageio": DEFAULT_IMAGEIO_SETTINGS, - "shelves": [] + "shelves": [], + "load": DEFAULT_LOADER_SETTINGS, } diff --git a/server_addon/substancepainter/server/version.py b/server_addon/substancepainter/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/substancepainter/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0" diff --git a/server_addon/timers_manager/package.py b/server_addon/timers_manager/package.py new file mode 100644 index 0000000000..bd6b81b4b7 --- /dev/null +++ b/server_addon/timers_manager/package.py @@ -0,0 +1,3 @@ +name = "timers_manager" +title = "Timers Manager" +version = "0.1.1" diff --git a/server_addon/timers_manager/server/__init__.py b/server_addon/timers_manager/server/__init__.py index 29f9d47370..32e83d295c 100644 --- a/server_addon/timers_manager/server/__init__.py +++ b/server_addon/timers_manager/server/__init__.py @@ -2,12 +2,8 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import TimersManagerSettings class TimersManagerAddon(BaseServerAddon): - name = "timers_manager" - version = __version__ - title = "Timers Manager" settings_model: Type[TimersManagerSettings] = TimersManagerSettings diff --git a/server_addon/timers_manager/server/version.py b/server_addon/timers_manager/server/version.py deleted file mode 100644 index 485f44ac21..0000000000 --- a/server_addon/timers_manager/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.1" diff --git a/server_addon/traypublisher/package.py b/server_addon/traypublisher/package.py new file mode 100644 index 0000000000..4ca8ae9fd3 --- /dev/null +++ b/server_addon/traypublisher/package.py @@ -0,0 +1,3 @@ +name = "traypublisher" +title = "TrayPublisher" +version = "0.1.4" diff --git a/server_addon/traypublisher/server/__init__.py b/server_addon/traypublisher/server/__init__.py index e6f079609f..830f325ac0 100644 --- a/server_addon/traypublisher/server/__init__.py +++ b/server_addon/traypublisher/server/__init__.py @@ -1,14 +1,9 @@ from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import TraypublisherSettings, DEFAULT_TRAYPUBLISHER_SETTING class Traypublisher(BaseServerAddon): - name = "traypublisher" - title = "TrayPublisher" - version = __version__ - settings_model = TraypublisherSettings async def get_default_settings(self): diff --git a/server_addon/traypublisher/server/settings/creator_plugins.py b/server_addon/traypublisher/server/settings/creator_plugins.py index bf66d9a088..1ff14002aa 100644 --- a/server_addon/traypublisher/server/settings/creator_plugins.py +++ b/server_addon/traypublisher/server/settings/creator_plugins.py @@ -1,4 +1,7 @@ +from pydantic import validator from ayon_server.settings import BaseSettingsModel, SettingsField +from ayon_server.settings.validators import ensure_unique_names +from ayon_server.exceptions import BadRequestException class BatchMovieCreatorPlugin(BaseSettingsModel): @@ -22,11 +25,139 @@ class BatchMovieCreatorPlugin(BaseSettingsModel): ) +class ColumnItemModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + name: str = SettingsField( + title="Name", + default="" + ) + + type: str = SettingsField( + title="Type", + default="" + ) + + default: str = SettingsField( + title="Default", + default="" + ) + + required_column: bool = SettingsField( + title="Required Column", + default=False + ) + + validation_pattern: str = SettingsField( + title="Validation Regex Pattern", + default="^(.*)$" + ) + + +class ColumnConfigModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + csv_delimiter: str = SettingsField( + title="CSV delimiter", + default="," + ) + + columns: list[ColumnItemModel] = SettingsField( + title="Columns", + default_factory=list + ) + + @validator("columns") + def validate_unique_outputs(cls, value): + ensure_unique_names(value) + return value + + +class RepresentationItemModel(BaseSettingsModel): + """Allows to publish multiple video files in one go. + + Name of matching asset is parsed from file names + ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov') + """ + + name: str = SettingsField( + title="Name", + default="" + ) + + extensions: list[str] = SettingsField( + title="Extensions", + default_factory=list + ) + + @validator("extensions") + def validate_extension(cls, value): + for ext in value: + if not ext.startswith("."): + raise BadRequestException(f"Extension must start with '.': {ext}") + return value + + +class RepresentationConfigModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + tags_delimiter: str = SettingsField( + title="Tags delimiter", + default=";" + ) + + default_tags: list[str] = SettingsField( + title="Default tags", + default_factory=list + ) + + representations: list[RepresentationItemModel] = SettingsField( + title="Representations", + default_factory=list + ) + + @validator("representations") + def validate_unique_outputs(cls, value): + ensure_unique_names(value) + return value + + +class IngestCSVPluginModel(BaseSettingsModel): + """Allows to publish multiple video files in one go.
Name of matching + asset is parsed from file names ('asset.mov', 'asset_v001.mov', + 'my_asset_to_publish.mov')""" + + enabled: bool = SettingsField( + title="Enabled", + default=False + ) + + columns_config: ColumnConfigModel = SettingsField( + title="Columns config", + default_factory=ColumnConfigModel + ) + + representations_config: RepresentationConfigModel = SettingsField( + title="Representations config", + default_factory=RepresentationConfigModel + ) + + class TrayPublisherCreatePluginsModel(BaseSettingsModel): BatchMovieCreator: BatchMovieCreatorPlugin = SettingsField( title="Batch Movie Creator", default_factory=BatchMovieCreatorPlugin ) + IngestCSV: IngestCSVPluginModel = SettingsField( + title="Ingest CSV", + default_factory=IngestCSVPluginModel + ) DEFAULT_CREATORS = { @@ -41,4 +172,170 @@ DEFAULT_CREATORS = { ".mov" ] }, + "IngestCSV": { + "enabled": True, + "columns_config": { + "csv_delimiter": ",", + "columns": [ + { + "name": "File Path", + "type": "text", + "default": "", + "required_column": True, + "validation_pattern": "^([a-z0-9#._\\/]*)$" + }, + { + "name": "Folder Path", + "type": "text", + "default": "", + "required_column": True, + "validation_pattern": "^([a-zA-Z0-9_\\/]*)$" + }, + { + "name": "Task Name", + "type": "text", + "default": "", + "required_column": True, + "validation_pattern": "^(.*)$" + }, + { + "name": "Product Type", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Variant", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Version", + "type": "number", + "default": 1, + "required_column": True, + "validation_pattern": "^(\\d{1,3})$" + }, + { + "name": "Version Comment", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Version Thumbnail", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^([a-zA-Z0-9#._\\/]*)$" + }, + { + "name": "Frame Start", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d{1,8})$" + }, + { + "name": "Frame End", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d{1,8})$" + }, + { + "name": "Handle Start", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d)$" + }, + { + "name": "Handle End", + "type": "number", + "default": 0, + "required_column": True, + "validation_pattern": "^(\\d)$" + }, + { + "name": "FPS", + "type": "decimal", + "default": 0.0, + "required_column": True, + "validation_pattern": "^[0-9]*\\.[0-9]+$|^[0-9]+$" + }, + { + "name": "Slate Exists", + "type": "bool", + "default": True, + "required_column": False, + "validation_pattern": "(True|False)" + }, + { + "name": "Representation", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Representation Colorspace", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + }, + { + "name": "Representation Tags", + "type": "text", + "default": "", + "required_column": False, + "validation_pattern": "^(.*)$" + } + ] + }, + "representations_config": { + "tags_delimiter": ";", + "default_tags": [ + "review" + ], + "representations": [ + { + "name": "preview", + "extensions": [ + ".mp4", + ".mov" + ] + }, + { + "name": "exr", + "extensions": [ + ".exr" + ] + }, + { + "name": "edit", + "extensions": [ + ".mov" + ] + }, + { + "name": "review", + "extensions": [ + ".mov" + ] + }, + { + "name": "nuke", + "extensions": [ + ".nk" + ] + } + ] + } + } } diff --git a/server_addon/traypublisher/server/version.py b/server_addon/traypublisher/server/version.py deleted file mode 100644 index de699158fd..0000000000 --- a/server_addon/traypublisher/server/version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -"""Package declaring addon version.""" -__version__ = "0.1.4" diff --git a/server_addon/tvpaint/package.py b/server_addon/tvpaint/package.py new file mode 100644 index 0000000000..2be3164f4a --- /dev/null +++ b/server_addon/tvpaint/package.py @@ -0,0 +1,3 @@ +name = "tvpaint" +title = "TVPaint" +version = "0.1.2" diff --git a/server_addon/tvpaint/server/__init__.py b/server_addon/tvpaint/server/__init__.py index 033d7d3792..658dcf0bb6 100644 --- a/server_addon/tvpaint/server/__init__.py +++ b/server_addon/tvpaint/server/__init__.py @@ -2,14 +2,10 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import TvpaintSettings, DEFAULT_VALUES class TvpaintAddon(BaseServerAddon): - name = "tvpaint" - title = "TVPaint" - version = __version__ settings_model: Type[TvpaintSettings] = TvpaintSettings async def get_default_settings(self): diff --git a/server_addon/tvpaint/server/version.py b/server_addon/tvpaint/server/version.py deleted file mode 100644 index b3f4756216..0000000000 --- a/server_addon/tvpaint/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.2" diff --git a/server_addon/unreal/package.py b/server_addon/unreal/package.py new file mode 100644 index 0000000000..cab89ca873 --- /dev/null +++ b/server_addon/unreal/package.py @@ -0,0 +1,3 @@ +name = "unreal" +title = "Unreal" +version = "0.1.0" diff --git a/server_addon/unreal/server/__init__.py b/server_addon/unreal/server/__init__.py index a5f3e9597d..751560b623 100644 --- a/server_addon/unreal/server/__init__.py +++ b/server_addon/unreal/server/__init__.py @@ -2,17 +2,11 @@ from typing import Type from ayon_server.addons import BaseServerAddon -from .version import __version__ from .settings import UnrealSettings, DEFAULT_VALUES class UnrealAddon(BaseServerAddon): - name = "unreal" - title = "Unreal" - version = __version__ settings_model: Type[UnrealSettings] = UnrealSettings - frontend_scopes = {} - services = {} async def get_default_settings(self): settings_model_cls = self.get_settings_model() diff --git a/server_addon/unreal/server/version.py b/server_addon/unreal/server/version.py deleted file mode 100644 index 3dc1f76bc6..0000000000 --- a/server_addon/unreal/server/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.1.0"