diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 2339ec878f..203ac1df23 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -35,6 +35,8 @@ body: label: Version description: What version are you running? Look to OpenPype Tray options: + - 3.15.11-nightly.4 + - 3.15.11-nightly.3 - 3.15.11-nightly.2 - 3.15.11-nightly.1 - 3.15.10 @@ -133,8 +135,6 @@ body: - 3.14.3 - 3.14.3-nightly.7 - 3.14.3-nightly.6 - - 3.14.3-nightly.5 - - 3.14.3-nightly.4 validations: required: true - type: dropdown diff --git a/openpype/client/operations.py b/openpype/client/operations.py index ef48f2a1c4..e8c9d28636 100644 --- a/openpype/client/operations.py +++ b/openpype/client/operations.py @@ -220,7 +220,6 @@ def new_representation_doc( "parent": version_id, "name": name, "data": data, - # Imprint shortcut to context for performance reasons. "context": context } @@ -708,7 +707,11 @@ class OperationsSession(object): return operation -def create_project(project_name, project_code, library_project=False): +def create_project( + project_name, + project_code, + library_project=False, +): """Create project using OpenPype settings. This project creation function is not validating project document on @@ -752,7 +755,7 @@ def create_project(project_name, project_code, library_project=False): "name": project_name, "data": { "code": project_code, - "library_project": library_project + "library_project": library_project, }, "schema": CURRENT_PROJECT_SCHEMA } diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py index e2af0720ec..1d53802ecf 100644 --- a/openpype/hosts/max/api/lib.py +++ b/openpype/hosts/max/api/lib.py @@ -1,30 +1,27 @@ # -*- coding: utf-8 -*- """Library of functions useful for 3dsmax pipeline.""" -import json -import six -from pymxs import runtime as rt -from typing import Union import contextlib +import json +from typing import Any, Dict, Union +import six from openpype.pipeline.context_tools import ( - get_current_project_asset, - get_current_project -) - + get_current_project, get_current_project_asset,) +from pymxs import runtime as rt JSON_PREFIX = "JSON::" def imprint(node_name: str, data: dict) -> bool: - node = rt.getNodeByName(node_name) + node = rt.GetNodeByName(node_name) if not node: return False for k, v in data.items(): if isinstance(v, (dict, list)): - rt.setUserProp(node, k, f'{JSON_PREFIX}{json.dumps(v)}') + rt.SetUserProp(node, k, f"{JSON_PREFIX}{json.dumps(v)}") else: - rt.setUserProp(node, k, v) + rt.SetUserProp(node, k, v) return True @@ -44,7 +41,7 @@ def lsattr( Returns: list of nodes. """ - root = rt.rootnode if root is None else rt.getNodeByName(root) + root = rt.RootNode if root is None else rt.GetNodeByName(root) def output_node(node, nodes): nodes.append(node) @@ -55,16 +52,16 @@ def lsattr( output_node(root, nodes) return [ n for n in nodes - if rt.getUserProp(n, attr) == value + if rt.GetUserProp(n, attr) == value ] if value else [ n for n in nodes - if rt.getUserProp(n, attr) + if rt.GetUserProp(n, attr) ] def read(container) -> dict: data = {} - props = rt.getUserPropBuffer(container) + props = rt.GetUserPropBuffer(container) # this shouldn't happen but let's guard against it anyway if not props: return data @@ -79,29 +76,25 @@ def read(container) -> dict: value = value.strip() if isinstance(value.strip(), six.string_types) and \ value.startswith(JSON_PREFIX): - try: + with contextlib.suppress(json.JSONDecodeError): value = json.loads(value[len(JSON_PREFIX):]) - except json.JSONDecodeError: - # not a json - pass - data[key.strip()] = value - data["instance_node"] = container.name + data["instance_node"] = container.Name return data @contextlib.contextmanager def maintained_selection(): - previous_selection = rt.getCurrentSelection() + previous_selection = rt.GetCurrentSelection() try: yield finally: if previous_selection: - rt.select(previous_selection) + rt.Select(previous_selection) else: - rt.select() + rt.Select() def get_all_children(parent, node_type=None): @@ -123,7 +116,7 @@ def get_all_children(parent, node_type=None): return children child_list = list_children(parent) - return ([x for x in child_list if rt.superClassOf(x) == node_type] + return ([x for x in child_list if rt.SuperClassOf(x) == node_type] if node_type else child_list) @@ -182,7 +175,7 @@ def set_scene_resolution(width: int, height: int): """ # make sure the render dialog is closed # for the update of resolution - # Changing the Render Setup dialog settingsshould be done + # Changing the Render Setup dialog settings should be done # with the actual Render Setup dialog in a closed state. if rt.renderSceneDialog.isOpen(): rt.renderSceneDialog.close() @@ -190,6 +183,7 @@ def set_scene_resolution(width: int, height: int): rt.renderWidth = width rt.renderHeight = height + def reset_scene_resolution(): """Apply the scene resolution from the project definition @@ -212,7 +206,7 @@ def reset_scene_resolution(): set_scene_resolution(width, height) -def get_frame_range() -> dict: +def get_frame_range() -> Union[Dict[str, Any], None]: """Get the current assets frame range and handles. Returns: @@ -259,7 +253,7 @@ def reset_frame_range(fps: bool = True): frange_cmd = ( f"animationRange = interval {frame_start_handle} {frame_end_handle}" ) - rt.execute(frange_cmd) + rt.Execute(frange_cmd) set_render_frame_range(frame_start_handle, frame_end_handle) @@ -289,5 +283,5 @@ def get_max_version(): #(25000, 62, 0, 25, 0, 0, 997, 2023, "") max_info[7] = max version date """ - max_info = rt.maxversion() + max_info = rt.MaxVersion() return max_info[7] diff --git a/openpype/hosts/max/api/lib_renderproducts.py b/openpype/hosts/max/api/lib_renderproducts.py index 94b0aeb913..3074f8e170 100644 --- a/openpype/hosts/max/api/lib_renderproducts.py +++ b/openpype/hosts/max/api/lib_renderproducts.py @@ -124,7 +124,7 @@ class RenderProducts(object): """Get all the Arnold AOVs name""" aov_name = [] - amw = rt.MaxtoAOps.AOVsManagerWindow() + amw = rt.MaxToAOps.AOVsManagerWindow() aov_mgr = rt.renderers.current.AOVManager # Check if there is any aov group set in AOV manager aov_group_num = len(aov_mgr.drivers) diff --git a/openpype/hosts/max/api/plugin.py b/openpype/hosts/max/api/plugin.py index b54568b360..4c1dbb2810 100644 --- a/openpype/hosts/max/api/plugin.py +++ b/openpype/hosts/max/api/plugin.py @@ -1,15 +1,105 @@ # -*- coding: utf-8 -*- """3dsmax specific Avalon/Pyblish plugin definitions.""" -from pymxs import runtime as rt -import six from abc import ABCMeta -from openpype.pipeline import ( - CreatorError, - Creator, - CreatedInstance -) + +import six +from pymxs import runtime as rt + from openpype.lib import BoolDef -from .lib import imprint, read, lsattr +from openpype.pipeline import CreatedInstance, Creator, CreatorError + +from .lib import imprint, lsattr, read + +MS_CUSTOM_ATTRIB = """attributes "openPypeData" +( + parameters main rollout:OPparams + ( + all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on + ) + + rollout OPparams "OP Parameters" + ( + listbox list_node "Node References" items:#() + button button_add "Add to Container" + button button_del "Delete from Container" + + fn node_to_name the_node = + ( + handle = the_node.handle + obj_name = the_node.name + handle_name = obj_name + "<" + handle as string + ">" + return handle_name + ) + + on button_add pressed do + ( + current_selection = selectByName title:"Select Objects to add to + the Container" buttontext:"Add" + if current_selection == undefined then return False + temp_arr = #() + i_node_arr = #() + for c in current_selection do + ( + handle_name = node_to_name c + node_ref = NodeTransformMonitor node:c + append temp_arr handle_name + append i_node_arr node_ref + ) + all_handles = join i_node_arr all_handles + list_node.items = join temp_arr list_node.items + ) + + on button_del pressed do + ( + current_selection = selectByName title:"Select Objects to remove + from the Container" buttontext:"Remove" + if current_selection == undefined then return False + temp_arr = #() + i_node_arr = #() + new_i_node_arr = #() + new_temp_arr = #() + + for c in current_selection do + ( + node_ref = NodeTransformMonitor node:c as string + handle_name = node_to_name c + tmp_all_handles = #() + for i in all_handles do + ( + tmp = i as string + append tmp_all_handles tmp + ) + idx = finditem tmp_all_handles node_ref + if idx do + ( + new_i_node_arr = DeleteItem all_handles idx + + ) + idx = finditem list_node.items handle_name + if idx do + ( + new_temp_arr = DeleteItem list_node.items idx + ) + ) + all_handles = join i_node_arr new_i_node_arr + list_node.items = join temp_arr new_temp_arr + ) + + on OPparams open do + ( + if all_handles.count != 0 then + ( + temp_arr = #() + for x in all_handles do + ( + handle_name = node_to_name x.node + append temp_arr handle_name + ) + list_node.items = temp_arr + ) + ) + ) +)""" class OpenPypeCreatorError(CreatorError): @@ -20,28 +110,40 @@ class MaxCreatorBase(object): @staticmethod def cache_subsets(shared_data): - if shared_data.get("max_cached_subsets") is None: - shared_data["max_cached_subsets"] = {} - cached_instances = lsattr("id", "pyblish.avalon.instance") - for i in cached_instances: - creator_id = rt.getUserProp(i, "creator_identifier") - if creator_id not in shared_data["max_cached_subsets"]: - shared_data["max_cached_subsets"][creator_id] = [i.name] - else: - shared_data[ - "max_cached_subsets"][creator_id].append(i.name) # noqa + if shared_data.get("max_cached_subsets") is not None: + return shared_data + + shared_data["max_cached_subsets"] = {} + cached_instances = lsattr("id", "pyblish.avalon.instance") + for i in cached_instances: + creator_id = rt.GetUserProp(i, "creator_identifier") + if creator_id not in shared_data["max_cached_subsets"]: + shared_data["max_cached_subsets"][creator_id] = [i.name] + else: + shared_data[ + "max_cached_subsets"][creator_id].append(i.name) return shared_data @staticmethod - def create_instance_node(node_name: str, parent: str = ""): - parent_node = rt.getNodeByName(parent) if parent else rt.rootScene - if not parent_node: - raise OpenPypeCreatorError(f"Specified parent {parent} not found") + def create_instance_node(node): + """Create instance node. - container = rt.container(name=node_name) - container.Parent = parent_node + If the supplied node is existing node, it will be used to hold the + instance, otherwise new node of type Dummy will be created. - return container + Args: + node (rt.MXSWrapperBase, str): Node or node name to use. + + Returns: + instance + """ + if isinstance(node, str): + node = rt.Container(name=node) + + attrs = rt.Execute(MS_CUSTOM_ATTRIB) + rt.custAttributes.add(node.baseObject, attrs) + + return node @six.add_metaclass(ABCMeta) @@ -50,7 +152,7 @@ class MaxCreator(Creator, MaxCreatorBase): def create(self, subset_name, instance_data, pre_create_data): if pre_create_data.get("use_selection"): - self.selected_nodes = rt.getCurrentSelection() + self.selected_nodes = rt.GetCurrentSelection() instance_node = self.create_instance_node(subset_name) instance_data["instance_node"] = instance_node.name @@ -60,8 +162,16 @@ class MaxCreator(Creator, MaxCreatorBase): instance_data, self ) - for node in self.selected_nodes: - node.Parent = instance_node + if pre_create_data.get("use_selection"): + + node_list = [] + for i in self.selected_nodes: + node_ref = rt.NodeTransformMonitor(node=i) + node_list.append(node_ref) + + # Setting the property + rt.setProperty( + instance_node.openPypeData, "all_handles", node_list) self._add_instance_to_context(instance) imprint(instance_node.name, instance.data_to_store()) @@ -70,10 +180,9 @@ class MaxCreator(Creator, MaxCreatorBase): def collect_instances(self): self.cache_subsets(self.collection_shared_data) - for instance in self.collection_shared_data[ - "max_cached_subsets"].get(self.identifier, []): + for instance in self.collection_shared_data["max_cached_subsets"].get(self.identifier, []): # noqa created_instance = CreatedInstance.from_existing( - read(rt.getNodeByName(instance)), self + read(rt.GetNodeByName(instance)), self ) self._add_instance_to_context(created_instance) @@ -98,12 +207,10 @@ class MaxCreator(Creator, MaxCreatorBase): """ for instance in instances: - instance_node = rt.getNodeByName( - instance.data.get("instance_node")) - if instance_node: - rt.select(instance_node) - rt.execute(f'for o in selection do for c in o.children do c.parent = undefined') # noqa - rt.delete(instance_node) + if instance_node := rt.GetNodeByName(instance.data.get("instance_node")): # noqa + count = rt.custAttributes.count(instance_node) + rt.custAttributes.delete(instance_node, count) + rt.Delete(instance_node) self._remove_instance_from_context(instance) diff --git a/openpype/hosts/max/plugins/create/create_camera.py b/openpype/hosts/max/plugins/create/create_camera.py index 91d0d4d3dc..804d629ec7 100644 --- a/openpype/hosts/max/plugins/create/create_camera.py +++ b/openpype/hosts/max/plugins/create/create_camera.py @@ -1,26 +1,11 @@ # -*- coding: utf-8 -*- """Creator plugin for creating camera.""" from openpype.hosts.max.api import plugin -from openpype.pipeline import CreatedInstance class CreateCamera(plugin.MaxCreator): + """Creator plugin for Camera.""" identifier = "io.openpype.creators.max.camera" label = "Camera" family = "camera" icon = "gear" - - def create(self, subset_name, instance_data, pre_create_data): - from pymxs import runtime as rt - sel_obj = list(rt.selection) - instance = super(CreateCamera, self).create( - subset_name, - instance_data, - pre_create_data) # type: CreatedInstance - container = rt.getNodeByName(instance.data.get("instance_node")) - # TODO: Disable "Add to Containers?" Panel - # parent the selected cameras into the container - for obj in sel_obj: - obj.parent = container - # for additional work on the node: - # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_maxScene.py b/openpype/hosts/max/plugins/create/create_maxScene.py index 7900336f32..851e26dda2 100644 --- a/openpype/hosts/max/plugins/create/create_maxScene.py +++ b/openpype/hosts/max/plugins/create/create_maxScene.py @@ -1,26 +1,11 @@ # -*- coding: utf-8 -*- """Creator plugin for creating raw max scene.""" from openpype.hosts.max.api import plugin -from openpype.pipeline import CreatedInstance class CreateMaxScene(plugin.MaxCreator): + """Creator plugin for 3ds max scenes.""" identifier = "io.openpype.creators.max.maxScene" label = "Max Scene" family = "maxScene" icon = "gear" - - def create(self, subset_name, instance_data, pre_create_data): - from pymxs import runtime as rt - sel_obj = list(rt.selection) - instance = super(CreateMaxScene, self).create( - subset_name, - instance_data, - pre_create_data) # type: CreatedInstance - container = rt.getNodeByName(instance.data.get("instance_node")) - # TODO: Disable "Add to Containers?" Panel - # parent the selected cameras into the container - for obj in sel_obj: - obj.parent = container - # for additional work on the node: - # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_model.py b/openpype/hosts/max/plugins/create/create_model.py index e7ae3af9db..fc09d475ef 100644 --- a/openpype/hosts/max/plugins/create/create_model.py +++ b/openpype/hosts/max/plugins/create/create_model.py @@ -1,28 +1,11 @@ # -*- coding: utf-8 -*- """Creator plugin for model.""" from openpype.hosts.max.api import plugin -from openpype.pipeline import CreatedInstance class CreateModel(plugin.MaxCreator): + """Creator plugin for Model.""" identifier = "io.openpype.creators.max.model" label = "Model" family = "model" icon = "gear" - - def create(self, subset_name, instance_data, pre_create_data): - from pymxs import runtime as rt - instance = super(CreateModel, self).create( - subset_name, - instance_data, - pre_create_data) # type: CreatedInstance - container = rt.getNodeByName(instance.data.get("instance_node")) - # TODO: Disable "Add to Containers?" Panel - # parent the selected cameras into the container - sel_obj = None - if self.selected_nodes: - sel_obj = list(self.selected_nodes) - for obj in sel_obj: - obj.parent = container - # for additional work on the node: - # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_pointcache.py b/openpype/hosts/max/plugins/create/create_pointcache.py index 32f0838471..c2d11f4c32 100644 --- a/openpype/hosts/max/plugins/create/create_pointcache.py +++ b/openpype/hosts/max/plugins/create/create_pointcache.py @@ -1,22 +1,11 @@ # -*- coding: utf-8 -*- """Creator plugin for creating pointcache alembics.""" from openpype.hosts.max.api import plugin -from openpype.pipeline import CreatedInstance class CreatePointCache(plugin.MaxCreator): + """Creator plugin for Point caches.""" identifier = "io.openpype.creators.max.pointcache" label = "Point Cache" family = "pointcache" icon = "gear" - - def create(self, subset_name, instance_data, pre_create_data): - # from pymxs import runtime as rt - - _ = super(CreatePointCache, self).create( - subset_name, - instance_data, - pre_create_data) # type: CreatedInstance - - # for additional work on the node: - # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_pointcloud.py b/openpype/hosts/max/plugins/create/create_pointcloud.py index c83acac3df..bc7706069d 100644 --- a/openpype/hosts/max/plugins/create/create_pointcloud.py +++ b/openpype/hosts/max/plugins/create/create_pointcloud.py @@ -1,26 +1,11 @@ # -*- coding: utf-8 -*- """Creator plugin for creating point cloud.""" from openpype.hosts.max.api import plugin -from openpype.pipeline import CreatedInstance class CreatePointCloud(plugin.MaxCreator): + """Creator plugin for Point Clouds.""" identifier = "io.openpype.creators.max.pointcloud" label = "Point Cloud" family = "pointcloud" icon = "gear" - - def create(self, subset_name, instance_data, pre_create_data): - from pymxs import runtime as rt - sel_obj = list(rt.selection) - instance = super(CreatePointCloud, self).create( - subset_name, - instance_data, - pre_create_data) # type: CreatedInstance - container = rt.getNodeByName(instance.data.get("instance_node")) - # TODO: Disable "Add to Containers?" Panel - # parent the selected cameras into the container - for obj in sel_obj: - obj.parent = container - # for additional work on the node: - # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_redshift_proxy.py b/openpype/hosts/max/plugins/create/create_redshift_proxy.py index 698ea82b69..6eb59f0a73 100644 --- a/openpype/hosts/max/plugins/create/create_redshift_proxy.py +++ b/openpype/hosts/max/plugins/create/create_redshift_proxy.py @@ -9,10 +9,3 @@ class CreateRedshiftProxy(plugin.MaxCreator): label = "Redshift Proxy" family = "redshiftproxy" icon = "gear" - - def create(self, subset_name, instance_data, pre_create_data): - - _ = super(CreateRedshiftProxy, self).create( - subset_name, - instance_data, - pre_create_data) # type: CreatedInstance diff --git a/openpype/hosts/max/plugins/create/create_render.py b/openpype/hosts/max/plugins/create/create_render.py index 5ad895b86e..41e49f4620 100644 --- a/openpype/hosts/max/plugins/create/create_render.py +++ b/openpype/hosts/max/plugins/create/create_render.py @@ -2,11 +2,11 @@ """Creator plugin for creating camera.""" import os from openpype.hosts.max.api import plugin -from openpype.pipeline import CreatedInstance from openpype.hosts.max.api.lib_rendersettings import RenderSettings class CreateRender(plugin.MaxCreator): + """Creator plugin for Renders.""" identifier = "io.openpype.creators.max.render" label = "Render" family = "maxrender" @@ -22,22 +22,10 @@ class CreateRender(plugin.MaxCreator): instance = super(CreateRender, self).create( subset_name, instance_data, - pre_create_data) # type: CreatedInstance + pre_create_data) container_name = instance.data.get("instance_node") - container = rt.getNodeByName(container_name) - # TODO: Disable "Add to Containers?" Panel - # parent the selected cameras into the container - for obj in sel_obj: - obj.parent = container - # for additional work on the node: - # instance_node = rt.getNodeByName(instance.get("instance_node")) - - # make sure the render dialog is closed - # for the update of resolution - # Changing the Render Setup dialog settings should be done - # with the actual Render Setup dialog in a closed state. - - # set viewport camera for rendering(mandatory for deadline) - RenderSettings().set_render_camera(sel_obj) + if sel_obj := self.selected_nodes: + # set viewport camera for rendering(mandatory for deadline) + RenderSettings(self.project_settings).set_render_camera(sel_obj) # set output paths for rendering(mandatory for deadline) RenderSettings().render_output(container_name) diff --git a/openpype/hosts/max/plugins/load/load_camera_fbx.py b/openpype/hosts/max/plugins/load/load_camera_fbx.py index 0c5dd762cf..c51900dbb7 100644 --- a/openpype/hosts/max/plugins/load/load_camera_fbx.py +++ b/openpype/hosts/max/plugins/load/load_camera_fbx.py @@ -1,14 +1,12 @@ import os -from openpype.pipeline import ( - load, - get_representation_path -) + +from openpype.hosts.max.api import lib, maintained_selection from openpype.hosts.max.api.pipeline import containerise -from openpype.hosts.max.api import lib +from openpype.pipeline import get_representation_path, load class FbxLoader(load.LoaderPlugin): - """Fbx Loader""" + """Fbx Loader.""" families = ["camera"] representations = ["fbx"] @@ -24,17 +22,17 @@ class FbxLoader(load.LoaderPlugin): rt.FBXImporterSetParam("Camera", True) rt.FBXImporterSetParam("AxisConversionMethod", True) rt.FBXImporterSetParam("Preserveinstances", True) - rt.importFile( + rt.ImportFile( filepath, rt.name("noPrompt"), using=rt.FBXIMP) - container = rt.getNodeByName(f"{name}") + container = rt.GetNodeByName(f"{name}") if not container: - container = rt.container() + container = rt.Container() container.name = f"{name}" - for selection in rt.getCurrentSelection(): + for selection in rt.GetCurrentSelection(): selection.Parent = container return containerise( @@ -44,18 +42,33 @@ class FbxLoader(load.LoaderPlugin): from pymxs import runtime as rt path = get_representation_path(representation) - node = rt.getNodeByName(container["instance_node"]) + node = rt.GetNodeByName(container["instance_node"]) + rt.Select(node.Children) + fbx_reimport_cmd = ( + f""" - fbx_objects = self.get_container_children(node) - for fbx_object in fbx_objects: - fbx_object.source = path +FBXImporterSetParam "Animation" true +FBXImporterSetParam "Cameras" true +FBXImporterSetParam "AxisConversionMethod" true +FbxExporterSetParam "UpAxis" "Y" +FbxExporterSetParam "Preserveinstances" true + +importFile @"{path}" #noPrompt using:FBXIMP + """) + rt.Execute(fbx_reimport_cmd) + + with maintained_selection(): + rt.Select(node) lib.imprint(container["instance_node"], { "representation": str(representation["_id"]) }) + def switch(self, container, representation): + self.update(container, representation) + def remove(self, container): from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) diff --git a/openpype/hosts/max/plugins/load/load_max_scene.py b/openpype/hosts/max/plugins/load/load_max_scene.py index 4b19cd671f..e3fb34f5bc 100644 --- a/openpype/hosts/max/plugins/load/load_max_scene.py +++ b/openpype/hosts/max/plugins/load/load_max_scene.py @@ -1,13 +1,12 @@ import os -from openpype.pipeline import ( - load, get_representation_path -) -from openpype.hosts.max.api.pipeline import containerise + from openpype.hosts.max.api import lib +from openpype.hosts.max.api.pipeline import containerise +from openpype.pipeline import get_representation_path, load class MaxSceneLoader(load.LoaderPlugin): - """Max Scene Loader""" + """Max Scene Loader.""" families = ["camera", "maxScene", @@ -23,23 +22,11 @@ class MaxSceneLoader(load.LoaderPlugin): path = os.path.normpath(self.fname) # import the max scene by using "merge file" path = path.replace('\\', '/') - - merge_before = { - c for c in rt.rootNode.Children - if rt.classOf(c) == rt.Container - } - rt.mergeMaxFile(path) - - merge_after = { - c for c in rt.rootNode.Children - if rt.classOf(c) == rt.Container - } - max_containers = merge_after.difference(merge_before) - - if len(max_containers) != 1: - self.log.error("Something failed when loading.") - - max_container = max_containers.pop() + rt.MergeMaxFile(path) + max_objects = rt.getLastMergedNodes() + max_container = rt.Container(name=f"{name}") + for max_object in max_objects: + max_object.Parent = max_container return containerise( name, [max_container], context, loader=self.__class__.__name__) @@ -48,17 +35,27 @@ class MaxSceneLoader(load.LoaderPlugin): from pymxs import runtime as rt path = get_representation_path(representation) - node = rt.getNodeByName(container["instance_node"]) - max_objects = node.Children + node_name = container["instance_node"] + + rt.MergeMaxFile(path, + rt.Name("noRedraw"), + rt.Name("deleteOldDups"), + rt.Name("useSceneMtlDups")) + + max_objects = rt.getLastMergedNodes() + container_node = rt.GetNodeByName(node_name) for max_object in max_objects: - max_object.source = path + max_object.Parent = container_node lib.imprint(container["instance_node"], { "representation": str(representation["_id"]) }) + def switch(self, container, representation): + self.update(container, representation) + def remove(self, container): from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) diff --git a/openpype/hosts/max/plugins/load/load_model.py b/openpype/hosts/max/plugins/load/load_model.py index 5f1ae3378e..58c6d3c889 100644 --- a/openpype/hosts/max/plugins/load/load_model.py +++ b/openpype/hosts/max/plugins/load/load_model.py @@ -54,22 +54,22 @@ class ModelAbcLoader(load.LoaderPlugin): from pymxs import runtime as rt path = get_representation_path(representation) - node = rt.getNodeByName(container["instance_node"]) - rt.select(node.Children) + node = rt.GetNodeByName(container["instance_node"]) + rt.Select(node.Children) - for alembic in rt.selection: - abc = rt.getNodeByName(alembic.name) - rt.select(abc.Children) - for abc_con in rt.selection: - container = rt.getNodeByName(abc_con.name) + for alembic in rt.Selection: + abc = rt.GetNodeByName(alembic.name) + rt.Select(abc.Children) + for abc_con in rt.Selection: + container = rt.GetNodeByName(abc_con.name) container.source = path - rt.select(container.Children) - for abc_obj in rt.selection: - alembic_obj = rt.getNodeByName(abc_obj.name) + rt.Select(container.Children) + for abc_obj in rt.Selection: + alembic_obj = rt.GetNodeByName(abc_obj.name) alembic_obj.source = path with maintained_selection(): - rt.select(node) + rt.Select(node) lib.imprint( container["instance_node"], @@ -82,8 +82,8 @@ class ModelAbcLoader(load.LoaderPlugin): def remove(self, container): from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) @staticmethod def get_container_children(parent, type_name): @@ -98,7 +98,7 @@ class ModelAbcLoader(load.LoaderPlugin): filtered = [] for child in list_children(parent): - class_type = str(rt.classOf(child.baseObject)) + class_type = str(rt.ClassOf(child.baseObject)) if class_type == type_name: filtered.append(child) diff --git a/openpype/hosts/max/plugins/load/load_model_fbx.py b/openpype/hosts/max/plugins/load/load_model_fbx.py index 61101c482d..663f79f9f5 100644 --- a/openpype/hosts/max/plugins/load/load_model_fbx.py +++ b/openpype/hosts/max/plugins/load/load_model_fbx.py @@ -6,7 +6,7 @@ from openpype.hosts.max.api.lib import maintained_selection class FbxModelLoader(load.LoaderPlugin): - """Fbx Model Loader""" + """Fbx Model Loader.""" families = ["model"] representations = ["fbx"] @@ -23,12 +23,12 @@ class FbxModelLoader(load.LoaderPlugin): rt.FBXImporterSetParam("Preserveinstances", True) rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP) - container = rt.getNodeByName(f"{name}") + container = rt.GetNodeByName(name) if not container: - container = rt.container() - container.name = f"{name}" + container = rt.Container() + container.name = name - for selection in rt.getCurrentSelection(): + for selection in rt.GetCurrentSelection(): selection.Parent = container return containerise( @@ -37,7 +37,6 @@ class FbxModelLoader(load.LoaderPlugin): def update(self, container, representation): from pymxs import runtime as rt - path = get_representation_path(representation) node = rt.getNodeByName(container["instance_node"]) rt.select(node.Children) @@ -50,7 +49,7 @@ class FbxModelLoader(load.LoaderPlugin): rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP) with maintained_selection(): - rt.select(node) + rt.Select(node) lib.imprint( container["instance_node"], @@ -63,5 +62,5 @@ class FbxModelLoader(load.LoaderPlugin): def remove(self, container): from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) diff --git a/openpype/hosts/max/plugins/load/load_model_obj.py b/openpype/hosts/max/plugins/load/load_model_obj.py index c55e462111..77d4e08cfb 100644 --- a/openpype/hosts/max/plugins/load/load_model_obj.py +++ b/openpype/hosts/max/plugins/load/load_model_obj.py @@ -1,15 +1,13 @@ import os -from openpype.pipeline import ( - load, - get_representation_path -) -from openpype.hosts.max.api.pipeline import containerise + from openpype.hosts.max.api import lib from openpype.hosts.max.api.lib import maintained_selection +from openpype.hosts.max.api.pipeline import containerise +from openpype.pipeline import get_representation_path, load class ObjLoader(load.LoaderPlugin): - """Obj Loader""" + """Obj Loader.""" families = ["model"] representations = ["obj"] @@ -21,18 +19,18 @@ class ObjLoader(load.LoaderPlugin): from pymxs import runtime as rt filepath = os.path.normpath(self.fname) - self.log.debug(f"Executing command to import..") + self.log.debug("Executing command to import..") - rt.execute(f'importFile @"{filepath}" #noPrompt using:ObjImp') + rt.Execute(f'importFile @"{filepath}" #noPrompt using:ObjImp') # create "missing" container for obj import - container = rt.container() - container.name = f"{name}" + container = rt.Container() + container.name = name # get current selection - for selection in rt.getCurrentSelection(): + for selection in rt.GetCurrentSelection(): selection.Parent = container - asset = rt.getNodeByName(f"{name}") + asset = rt.GetNodeByName(name) return containerise( name, [asset], context, loader=self.__class__.__name__) @@ -42,27 +40,30 @@ class ObjLoader(load.LoaderPlugin): path = get_representation_path(representation) node_name = container["instance_node"] - node = rt.getNodeByName(node_name) + node = rt.GetNodeByName(node_name) instance_name, _ = node_name.split("_") - container = rt.getNodeByName(instance_name) - for n in container.Children: - rt.delete(n) + container = rt.GetNodeByName(instance_name) + for child in container.Children: + rt.Delete(child) - rt.execute(f'importFile @"{path}" #noPrompt using:ObjImp') + rt.Execute(f'importFile @"{path}" #noPrompt using:ObjImp') # get current selection - for selection in rt.getCurrentSelection(): + for selection in rt.GetCurrentSelection(): selection.Parent = container with maintained_selection(): - rt.select(node) + rt.Select(node) lib.imprint(node_name, { "representation": str(representation["_id"]) }) + def switch(self, container, representation): + self.update(container, representation) + def remove(self, container): from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) diff --git a/openpype/hosts/max/plugins/load/load_model_usd.py b/openpype/hosts/max/plugins/load/load_model_usd.py index 143f91f40b..2b34669278 100644 --- a/openpype/hosts/max/plugins/load/load_model_usd.py +++ b/openpype/hosts/max/plugins/load/load_model_usd.py @@ -1,10 +1,9 @@ import os -from openpype.pipeline import ( - load, get_representation_path -) -from openpype.hosts.max.api.pipeline import containerise + from openpype.hosts.max.api import lib from openpype.hosts.max.api.lib import maintained_selection +from openpype.hosts.max.api.pipeline import containerise +from openpype.pipeline import get_representation_path, load class ModelUSDLoader(load.LoaderPlugin): @@ -19,6 +18,7 @@ class ModelUSDLoader(load.LoaderPlugin): def load(self, context, name=None, namespace=None, data=None): from pymxs import runtime as rt + # asset_filepath filepath = os.path.normpath(self.fname) import_options = rt.USDImporter.CreateOptions() @@ -27,11 +27,11 @@ class ModelUSDLoader(load.LoaderPlugin): log_filepath = filepath.replace(ext, "txt") rt.LogPath = log_filepath - rt.LogLevel = rt.name('info') + rt.LogLevel = rt.Name("info") rt.USDImporter.importFile(filepath, importOptions=import_options) - asset = rt.getNodeByName(f"{name}") + asset = rt.GetNodeByName(name) return containerise( name, [asset], context, loader=self.__class__.__name__) @@ -41,11 +41,11 @@ class ModelUSDLoader(load.LoaderPlugin): path = get_representation_path(representation) node_name = container["instance_node"] - node = rt.getNodeByName(node_name) + node = rt.GetNodeByName(node_name) for n in node.Children: for r in n.Children: - rt.delete(r) - rt.delete(n) + rt.Delete(r) + rt.Delete(n) instance_name, _ = node_name.split("_") import_options = rt.USDImporter.CreateOptions() @@ -54,15 +54,15 @@ class ModelUSDLoader(load.LoaderPlugin): log_filepath = path.replace(ext, "txt") rt.LogPath = log_filepath - rt.LogLevel = rt.name('info') + rt.LogLevel = rt.Name("info") rt.USDImporter.importFile(path, importOptions=import_options) - asset = rt.getNodeByName(f"{instance_name}") + asset = rt.GetNodeByName(instance_name) asset.Parent = node with maintained_selection(): - rt.select(node) + rt.Select(node) lib.imprint(node_name, { "representation": str(representation["_id"]) @@ -74,5 +74,5 @@ class ModelUSDLoader(load.LoaderPlugin): def remove(self, container): from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) diff --git a/openpype/hosts/max/plugins/load/load_pointcache.py b/openpype/hosts/max/plugins/load/load_pointcache.py index 5fb9772f87..cadbe7cac2 100644 --- a/openpype/hosts/max/plugins/load/load_pointcache.py +++ b/openpype/hosts/max/plugins/load/load_pointcache.py @@ -6,8 +6,8 @@ Because of limited api, alembics can be only loaded, but not easily updated. """ import os from openpype.pipeline import load, get_representation_path +from openpype.hosts.max.api import lib, maintained_selection from openpype.hosts.max.api.pipeline import containerise -from openpype.hosts.max.api import lib class AbcLoader(load.LoaderPlugin): @@ -48,6 +48,10 @@ class AbcLoader(load.LoaderPlugin): abc_container = abc_containers.pop() + for abc in rt.GetCurrentSelection(): + for cam_shape in abc.Children: + cam_shape.playbackType = 2 + return containerise( name, [abc_container], context, loader=self.__class__.__name__ ) @@ -56,7 +60,7 @@ class AbcLoader(load.LoaderPlugin): from pymxs import runtime as rt path = get_representation_path(representation) - node = rt.getNodeByName(container["instance_node"]) + node = rt.GetNodeByName(container["instance_node"]) alembic_objects = self.get_container_children(node, "AlembicObject") for alembic_object in alembic_objects: @@ -67,14 +71,28 @@ class AbcLoader(load.LoaderPlugin): {"representation": str(representation["_id"])}, ) + with maintained_selection(): + rt.Select(node.Children) + + for alembic in rt.Selection: + abc = rt.GetNodeByName(alembic.name) + rt.Select(abc.Children) + for abc_con in rt.Selection: + container = rt.GetNodeByName(abc_con.name) + container.source = path + rt.Select(container.Children) + for abc_obj in rt.Selection: + alembic_obj = rt.GetNodeByName(abc_obj.name) + alembic_obj.source = path + def switch(self, container, representation): self.update(container, representation) def remove(self, container): from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) @staticmethod def get_container_children(parent, type_name): diff --git a/openpype/hosts/max/plugins/load/load_pointcloud.py b/openpype/hosts/max/plugins/load/load_pointcloud.py index 27bc88b4f3..8634e1d51f 100644 --- a/openpype/hosts/max/plugins/load/load_pointcloud.py +++ b/openpype/hosts/max/plugins/load/load_pointcloud.py @@ -1,13 +1,12 @@ import os -from openpype.pipeline import ( - load, get_representation_path -) + +from openpype.hosts.max.api import lib, maintained_selection from openpype.hosts.max.api.pipeline import containerise -from openpype.hosts.max.api import lib +from openpype.pipeline import get_representation_path, load class PointCloudLoader(load.LoaderPlugin): - """Point Cloud Loader""" + """Point Cloud Loader.""" families = ["pointcloud"] representations = ["prt"] @@ -23,7 +22,7 @@ class PointCloudLoader(load.LoaderPlugin): obj = rt.tyCache() obj.filename = filepath - prt_container = rt.getNodeByName(f"{obj.name}") + prt_container = rt.GetNodeByName(obj.name) return containerise( name, [prt_container], context, loader=self.__class__.__name__) @@ -33,19 +32,23 @@ class PointCloudLoader(load.LoaderPlugin): from pymxs import runtime as rt path = get_representation_path(representation) - node = rt.getNodeByName(container["instance_node"]) + node = rt.GetNodeByName(container["instance_node"]) + with maintained_selection(): + rt.Select(node.Children) + for prt in rt.Selection: + prt_object = rt.GetNodeByName(prt.name) + prt_object.filename = path - prt_objects = self.get_container_children(node) - for prt_object in prt_objects: - prt_object.source = path + lib.imprint(container["instance_node"], { + "representation": str(representation["_id"]) + }) - lib.imprint(container["instance_node"], { - "representation": str(representation["_id"]) - }) + def switch(self, container, representation): + self.update(container, representation) def remove(self, container): """remove the container""" from pymxs import runtime as rt - node = rt.getNodeByName(container["instance_node"]) - rt.delete(node) + node = rt.GetNodeByName(container["instance_node"]) + rt.Delete(node) diff --git a/openpype/hosts/max/plugins/publish/collect_members.py b/openpype/hosts/max/plugins/publish/collect_members.py new file mode 100644 index 0000000000..812d82ff26 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/collect_members.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +"""Collect instance members.""" +import pyblish.api +from pymxs import runtime as rt + + +class CollectMembers(pyblish.api.InstancePlugin): + """Collect Set Members.""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect Instance Members" + hosts = ['max'] + + def process(self, instance): + + if instance.data.get("instance_node"): + container = rt.GetNodeByName(instance.data["instance_node"]) + instance.data["members"] = [ + member.node for member + in container.openPypeData.all_handles + ] + self.log.debug("{}".format(instance.data["members"])) diff --git a/openpype/hosts/max/plugins/publish/extract_camera_abc.py b/openpype/hosts/max/plugins/publish/extract_camera_abc.py index 6b3bb178a3..b42732e70d 100644 --- a/openpype/hosts/max/plugins/publish/extract_camera_abc.py +++ b/openpype/hosts/max/plugins/publish/extract_camera_abc.py @@ -1,14 +1,14 @@ import os + import pyblish.api -from openpype.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection, get_all_children + +from openpype.hosts.max.api import maintained_selection +from openpype.pipeline import OptionalPyblishPluginMixin, publish class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin): - """ - Extract Camera with AlembicExport - """ + """Extract Camera with AlembicExport.""" order = pyblish.api.ExtractorOrder - 0.1 label = "Extract Alembic Camera" @@ -31,20 +31,21 @@ class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin): path = os.path.join(stagingdir, filename) # We run the render - self.log.info("Writing alembic '%s' to '%s'" % (filename, stagingdir)) + self.log.info(f"Writing alembic '{filename}' to '{stagingdir}'") - rt.AlembicExport.ArchiveType = rt.name("ogawa") - rt.AlembicExport.CoordinateSystem = rt.name("maya") + rt.AlembicExport.ArchiveType = rt.Name("ogawa") + rt.AlembicExport.CoordinateSystem = rt.Name("maya") rt.AlembicExport.StartFrame = start rt.AlembicExport.EndFrame = end rt.AlembicExport.CustomAttributes = True with maintained_selection(): # select and export - rt.select(get_all_children(rt.getNodeByName(container))) - rt.exportFile( + node_list = instance.data["members"] + rt.Select(node_list) + rt.ExportFile( path, - rt.name("noPrompt"), + rt.Name("noPrompt"), selectedOnly=True, using=rt.AlembicExport, ) @@ -58,6 +59,8 @@ class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin): "ext": "abc", "files": filename, "stagingDir": stagingdir, + "frameStart": start, + "frameEnd": end, } instance.data["representations"].append(representation) - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) + self.log.info(f"Extracted instance '{instance.name}' to: {path}") diff --git a/openpype/hosts/max/plugins/publish/extract_camera_fbx.py b/openpype/hosts/max/plugins/publish/extract_camera_fbx.py index 4b4b349e19..06ac3da093 100644 --- a/openpype/hosts/max/plugins/publish/extract_camera_fbx.py +++ b/openpype/hosts/max/plugins/publish/extract_camera_fbx.py @@ -1,14 +1,14 @@ import os + import pyblish.api -from openpype.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection, get_all_children + +from openpype.hosts.max.api import maintained_selection +from openpype.pipeline import OptionalPyblishPluginMixin, publish class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin): - """ - Extract Camera with FbxExporter - """ + """Extract Camera with FbxExporter.""" order = pyblish.api.ExtractorOrder - 0.2 label = "Extract Fbx Camera" @@ -26,7 +26,7 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin): filename = "{name}.fbx".format(**instance.data) filepath = os.path.join(stagingdir, filename) - self.log.info("Writing fbx file '%s' to '%s'" % (filename, filepath)) + self.log.info(f"Writing fbx file '{filename}' to '{filepath}'") rt.FBXExporterSetParam("Animation", True) rt.FBXExporterSetParam("Cameras", True) @@ -36,10 +36,11 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin): with maintained_selection(): # select and export - rt.select(get_all_children(rt.getNodeByName(container))) - rt.exportFile( + node_list = instance.data["members"] + rt.Select(node_list) + rt.ExportFile( filepath, - rt.name("noPrompt"), + rt.Name("noPrompt"), selectedOnly=True, using=rt.FBXEXP, ) @@ -55,6 +56,4 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin): "stagingDir": stagingdir, } instance.data["representations"].append(representation) - self.log.info( - "Extracted instance '%s' to: %s" % (instance.name, filepath) - ) + self.log.info(f"Extracted instance '{instance.name}' to: {filepath}") diff --git a/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py b/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py index f0c2aff7f3..de5db9ab56 100644 --- a/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py +++ b/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py @@ -2,7 +2,6 @@ import os import pyblish.api from openpype.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt -from openpype.hosts.max.api import get_all_children class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin): @@ -33,7 +32,7 @@ class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin): if "representations" not in instance.data: instance.data["representations"] = [] - nodes = get_all_children(rt.getNodeByName(container)) + nodes = instance.data["members"] rt.saveNodes(nodes, max_path, quiet=True) self.log.info("Performing Extraction ...") diff --git a/openpype/hosts/max/plugins/publish/extract_model.py b/openpype/hosts/max/plugins/publish/extract_model.py index 4c7c98e2cc..c7ecf7efc9 100644 --- a/openpype/hosts/max/plugins/publish/extract_model.py +++ b/openpype/hosts/max/plugins/publish/extract_model.py @@ -2,7 +2,7 @@ import os import pyblish.api from openpype.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection, get_all_children +from openpype.hosts.max.api import maintained_selection class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin): @@ -40,7 +40,8 @@ class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin): with maintained_selection(): # select and export - rt.select(get_all_children(rt.getNodeByName(container))) + node_list = instance.data["members"] + rt.Select(node_list) rt.exportFile( filepath, rt.name("noPrompt"), diff --git a/openpype/hosts/max/plugins/publish/extract_model_fbx.py b/openpype/hosts/max/plugins/publish/extract_model_fbx.py index e6ccb24cdd..56c2cadd94 100644 --- a/openpype/hosts/max/plugins/publish/extract_model_fbx.py +++ b/openpype/hosts/max/plugins/publish/extract_model_fbx.py @@ -2,7 +2,7 @@ import os import pyblish.api from openpype.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection, get_all_children +from openpype.hosts.max.api import maintained_selection class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin): @@ -22,6 +22,7 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin): container = instance.data["instance_node"] + self.log.info("Extracting Geometry ...") stagingdir = self.staging_dir(instance) @@ -39,7 +40,8 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin): with maintained_selection(): # select and export - rt.select(get_all_children(rt.getNodeByName(container))) + node_list = instance.data["members"] + rt.Select(node_list) rt.exportFile( filepath, rt.name("noPrompt"), diff --git a/openpype/hosts/max/plugins/publish/extract_model_obj.py b/openpype/hosts/max/plugins/publish/extract_model_obj.py index ed3d68c990..4fde65cf22 100644 --- a/openpype/hosts/max/plugins/publish/extract_model_obj.py +++ b/openpype/hosts/max/plugins/publish/extract_model_obj.py @@ -2,7 +2,7 @@ import os import pyblish.api from openpype.pipeline import publish, OptionalPyblishPluginMixin from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection, get_all_children +from openpype.hosts.max.api import maintained_selection class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin): @@ -31,7 +31,8 @@ class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin): with maintained_selection(): # select and export - rt.select(get_all_children(rt.getNodeByName(container))) + node_list = instance.data["members"] + rt.Select(node_list) rt.exportFile( filepath, rt.name("noPrompt"), diff --git a/openpype/hosts/max/plugins/publish/extract_model_usd.py b/openpype/hosts/max/plugins/publish/extract_model_usd.py index 0bed2d855e..da37c77bf7 100644 --- a/openpype/hosts/max/plugins/publish/extract_model_usd.py +++ b/openpype/hosts/max/plugins/publish/extract_model_usd.py @@ -1,20 +1,15 @@ import os + import pyblish.api -from openpype.pipeline import ( - publish, - OptionalPyblishPluginMixin -) from pymxs import runtime as rt -from openpype.hosts.max.api import ( - maintained_selection -) + +from openpype.hosts.max.api import maintained_selection +from openpype.pipeline import OptionalPyblishPluginMixin, publish class ExtractModelUSD(publish.Extractor, OptionalPyblishPluginMixin): - """ - Extract Geometry in USDA Format - """ + """Extract Geometry in USDA Format.""" order = pyblish.api.ExtractorOrder - 0.05 label = "Extract Geometry (USD)" @@ -26,31 +21,28 @@ class ExtractModelUSD(publish.Extractor, if not self.is_active(instance.data): return - container = instance.data["instance_node"] - self.log.info("Extracting Geometry ...") stagingdir = self.staging_dir(instance) asset_filename = "{name}.usda".format(**instance.data) asset_filepath = os.path.join(stagingdir, asset_filename) - self.log.info("Writing USD '%s' to '%s'" % (asset_filepath, - stagingdir)) + self.log.info(f"Writing USD '{asset_filepath}' to '{stagingdir}'") log_filename = "{name}.txt".format(**instance.data) log_filepath = os.path.join(stagingdir, log_filename) - self.log.info("Writing log '%s' to '%s'" % (log_filepath, - stagingdir)) + self.log.info(f"Writing log '{log_filepath}' to '{stagingdir}'") # get the nodes which need to be exported export_options = self.get_export_options(log_filepath) with maintained_selection(): # select and export - node_list = self.get_node_list(container) + node_list = instance.data["members"] + rt.Select(node_list) rt.USDExporter.ExportFile(asset_filepath, exportOptions=export_options, - contentSource=rt.name("selected"), + contentSource=rt.Name("selected"), nodeList=node_list) self.log.info("Performing Extraction ...") @@ -73,25 +65,11 @@ class ExtractModelUSD(publish.Extractor, } instance.data["representations"].append(log_representation) - self.log.info("Extracted instance '%s' to: %s" % (instance.name, - asset_filepath)) + self.log.info( + f"Extracted instance '{instance.name}' to: {asset_filepath}") - def get_node_list(self, container): - """ - Get the target nodes which are - the children of the container - """ - node_list = [] - - container_node = rt.getNodeByName(container) - target_node = container_node.Children - rt.select(target_node) - for sel in rt.selection: - node_list.append(sel) - - return node_list - - def get_export_options(self, log_path): + @staticmethod + def get_export_options(log_path): """Set Export Options for USD Exporter""" export_options = rt.USDExporter.createOptions() @@ -101,13 +79,13 @@ class ExtractModelUSD(publish.Extractor, export_options.Lights = False export_options.Cameras = False export_options.Materials = False - export_options.MeshFormat = rt.name('fromScene') - export_options.FileFormat = rt.name('ascii') - export_options.UpAxis = rt.name('y') - export_options.LogLevel = rt.name('info') + export_options.MeshFormat = rt.Name('fromScene') + export_options.FileFormat = rt.Name('ascii') + export_options.UpAxis = rt.Name('y') + export_options.LogLevel = rt.Name('info') export_options.LogPath = log_path export_options.PreserveEdgeOrientation = True - export_options.TimeMode = rt.name('current') + export_options.TimeMode = rt.Name('current') rt.USDexporter.UIOptions = export_options diff --git a/openpype/hosts/max/plugins/publish/extract_pointcache.py b/openpype/hosts/max/plugins/publish/extract_pointcache.py index 8658cecb1b..6d1e8d03b4 100644 --- a/openpype/hosts/max/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/max/plugins/publish/extract_pointcache.py @@ -41,7 +41,7 @@ import os import pyblish.api from openpype.pipeline import publish from pymxs import runtime as rt -from openpype.hosts.max.api import maintained_selection, get_all_children +from openpype.hosts.max.api import maintained_selection class ExtractAlembic(publish.Extractor): @@ -72,7 +72,8 @@ class ExtractAlembic(publish.Extractor): with maintained_selection(): # select and export - rt.select(get_all_children(rt.getNodeByName(container))) + node_list = instance.data["members"] + rt.Select(node_list) rt.exportFile( path, rt.name("noPrompt"), diff --git a/openpype/hosts/max/plugins/publish/extract_pointcloud.py b/openpype/hosts/max/plugins/publish/extract_pointcloud.py index e8d58ab713..583bbb6dbd 100644 --- a/openpype/hosts/max/plugins/publish/extract_pointcloud.py +++ b/openpype/hosts/max/plugins/publish/extract_pointcloud.py @@ -1,42 +1,34 @@ import os + import pyblish.api -from openpype.pipeline import publish from pymxs import runtime as rt -from openpype.hosts.max.api import ( - maintained_selection -) -from openpype.settings import get_project_settings -from openpype.pipeline import legacy_io - -def get_setting(project_setting=None): - project_setting = get_project_settings( - legacy_io.Session["AVALON_PROJECT"] - ) - return (project_setting["max"]["PointCloud"]) +from openpype.hosts.max.api import maintained_selection +from openpype.pipeline import publish class ExtractPointCloud(publish.Extractor): """ - Extract PRT format with tyFlow operators + Extract PRT format with tyFlow operators. Notes: Currently only works for the default partition setting Args: - export_particle(): sets up all job arguments for attributes - to be exported in MAXscript + self.export_particle(): sets up all job arguments for attributes + to be exported in MAXscript - get_operators(): get the export_particle operator + self.get_operators(): get the export_particle operator - get_custom_attr(): get all custom channel attributes from Openpype - setting and sets it as job arguments before exporting + self.get_custom_attr(): get all custom channel attributes from Openpype + setting and sets it as job arguments before exporting - get_files(): get the files with tyFlow naming convention - before publishing + self.get_files(): get the files with tyFlow naming convention + before publishing - partition_output_name(): get the naming with partition settings. - get_partition(): get partition value + self.partition_output_name(): get the naming with partition settings. + + self.get_partition(): get partition value """ @@ -46,9 +38,9 @@ class ExtractPointCloud(publish.Extractor): families = ["pointcloud"] def process(self, instance): + self.settings = self.get_setting(instance) start = int(instance.context.data.get("frameStart")) end = int(instance.context.data.get("frameEnd")) - container = instance.data["instance_node"] self.log.info("Extracting PRT...") stagingdir = self.staging_dir(instance) @@ -56,22 +48,25 @@ class ExtractPointCloud(publish.Extractor): path = os.path.join(stagingdir, filename) with maintained_selection(): - job_args = self.export_particle(container, + job_args = self.export_particle(instance.data["members"], start, end, path) + for job in job_args: - rt.execute(job) + rt.Execute(job) self.log.info("Performing Extraction ...") if "representations" not in instance.data: instance.data["representations"] = [] self.log.info("Writing PRT with TyFlow Plugin...") - filenames = self.get_files(container, path, start, end) - self.log.debug("filenames: {0}".format(filenames)) + filenames = self.get_files( + instance.data["members"], path, start, end) + self.log.debug(f"filenames: {filenames}") - partition = self.partition_output_name(container) + partition = self.partition_output_name( + instance.data["members"]) representation = { 'name': 'prt', @@ -81,67 +76,84 @@ class ExtractPointCloud(publish.Extractor): "outputName": partition # partition value } instance.data["representations"].append(representation) - self.log.info("Extracted instance '%s' to: %s" % (instance.name, - path)) + self.log.info(f"Extracted instance '{instance.name}' to: {path}") def export_particle(self, - container, + members, start, end, filepath): + """Sets up all job arguments for attributes. + + Those attributes are to be exported in MAX Script. + + Args: + members (list): Member nodes of the instance. + start (int): Start frame. + end (int): End frame. + filepath (str): Path to PRT file. + + Returns: + list of arguments for MAX Script. + + """ job_args = [] - opt_list = self.get_operators(container) + opt_list = self.get_operators(members) for operator in opt_list: - start_frame = "{0}.frameStart={1}".format(operator, - start) + start_frame = f"{operator}.frameStart={start}" job_args.append(start_frame) - end_frame = "{0}.frameEnd={1}".format(operator, - end) + end_frame = f"{operator}.frameEnd={end}" job_args.append(end_frame) filepath = filepath.replace("\\", "/") - prt_filename = '{0}.PRTFilename="{1}"'.format(operator, - filepath) - + prt_filename = f'{operator}.PRTFilename="{filepath}"' job_args.append(prt_filename) # Partition - mode = "{0}.PRTPartitionsMode=2".format(operator) + mode = f"{operator}.PRTPartitionsMode=2" job_args.append(mode) additional_args = self.get_custom_attr(operator) - for args in additional_args: - job_args.append(args) - - prt_export = "{0}.exportPRT()".format(operator) + job_args.extend(iter(additional_args)) + prt_export = f"{operator}.exportPRT()" job_args.append(prt_export) return job_args - def get_operators(self, container): - """Get Export Particles Operator""" + @staticmethod + def get_operators(members): + """Get Export Particles Operator. + Args: + members (list): Instance members. + + Returns: + list of particle operators + + """ opt_list = [] - node = rt.getNodebyName(container) - selection_list = list(node.Children) - for sel in selection_list: - obj = sel.baseobject - # TODO: to see if it can be used maxscript instead - anim_names = rt.getsubanimnames(obj) + for member in members: + obj = member.baseobject + # TODO: to see if it can be used maxscript instead + anim_names = rt.GetSubAnimNames(obj) for anim_name in anim_names: - sub_anim = rt.getsubanim(obj, anim_name) - boolean = rt.isProperty(sub_anim, "Export_Particles") - event_name = sub_anim.name + sub_anim = rt.GetSubAnim(obj, anim_name) + boolean = rt.IsProperty(sub_anim, "Export_Particles") if boolean: - opt = "${0}.{1}.export_particles".format(sel.name, - event_name) - opt_list.append(opt) + event_name = sub_anim.Name + opt = f"${member.Name}.{event_name}.export_particles" + opt_list.append(opt) return opt_list + @staticmethod + def get_setting(instance): + project_setting = instance.context.data["project_settings"] + return project_setting["max"]["PointCloud"] + def get_custom_attr(self, operator): """Get Custom Attributes""" custom_attr_list = [] - attr_settings = get_setting()["attribute"] + attr_settings = self.settings["attribute"] for key, value in attr_settings.items(): custom_attr = "{0}.PRTChannels_{1}=True".format(operator, value) @@ -157,14 +169,25 @@ class ExtractPointCloud(publish.Extractor): path, start_frame, end_frame): - """ - Note: - Set the filenames accordingly to the tyFlow file - naming extension for the publishing purpose + """Get file names for tyFlow. - Actual File Output from tyFlow: + Set the filenames accordingly to the tyFlow file + naming extension for the publishing purpose + + Actual File Output from tyFlow:: __partof..prt + e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt + + Args: + container: Instance node. + path (str): Output directory. + start_frame (int): Start frame. + end_frame (int): End frame. + + Returns: + list of filenames + """ filenames = [] filename = os.path.basename(path) @@ -181,27 +204,36 @@ class ExtractPointCloud(publish.Extractor): return filenames def partition_output_name(self, container): - """ - Notes: - Partition output name set for mapping - the published file output + """Get partition output name. + + Partition output name set for mapping + the published file output. + + Todo: + Customizes the setting for the output. + + Args: + container: Instance node. + + Returns: + str: Partition name. - todo: - Customizes the setting for the output """ partition_count, partition_start = self.get_partition(container) - partition = "_part{:03}of{}".format(partition_start, - partition_count) - - return partition + return f"_part{partition_start:03}of{partition_count}" def get_partition(self, container): - """ - Get Partition Value + """Get Partition value. + + Args: + container: Instance node. + """ opt_list = self.get_operators(container) + # TODO: This looks strange? Iterating over + # the opt_list but returning from inside? for operator in opt_list: - count = rt.execute(f'{operator}.PRTPartitionsCount') - start = rt.execute(f'{operator}.PRTPartitionsFrom') + count = rt.Execute(f'{operator}.PRTPartitionsCount') + start = rt.Execute(f'{operator}.PRTPartitionsFrom') return count, start diff --git a/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py index 3b44099609..ab569ecbcb 100644 --- a/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py +++ b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py @@ -30,8 +30,8 @@ class ExtractRedshiftProxy(publish.Extractor): with maintained_selection(): # select and export - con = rt.getNodeByName(container) - rt.select(con.Children) + node_list = instance.data["members"] + rt.Select(node_list) # Redshift rsProxy command # rsProxy fp selected compress connectivity startFrame endFrame # camera warnExisting transformPivotToOrigin diff --git a/openpype/hosts/max/plugins/publish/validate_camera_contents.py b/openpype/hosts/max/plugins/publish/validate_camera_contents.py index c81e28a61f..85be5d59fa 100644 --- a/openpype/hosts/max/plugins/publish/validate_camera_contents.py +++ b/openpype/hosts/max/plugins/publish/validate_camera_contents.py @@ -18,30 +18,24 @@ class ValidateCameraContent(pyblish.api.InstancePlugin): "$Physical_Camera", "$Target"] def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Camera instance must only include" - "camera (and camera target)") + if invalid := self.get_invalid(instance): # noqa + raise PublishValidationError(("Camera instance must only include" + "camera (and camera target). " + f"Invalid content {invalid}")) def get_invalid(self, instance): """ Get invalid nodes if the instance is not camera """ - invalid = list() + invalid = [] container = instance.data["instance_node"] - self.log.info("Validating look content for " - "{}".format(container)) + self.log.info(f"Validating camera content for {container}") - con = rt.getNodeByName(container) - selection_list = list(con.Children) + selection_list = instance.data["members"] for sel in selection_list: # to avoid Attribute Error from pymxs wrapper sel_tmp = str(sel) - found = False - for cam in self.camera_type: - if sel_tmp.startswith(cam): - found = True - break + found = any(sel_tmp.startswith(cam) for cam in self.camera_type) if not found: self.log.error("Camera not found") invalid.append(sel) diff --git a/openpype/hosts/max/plugins/publish/validate_model_contents.py b/openpype/hosts/max/plugins/publish/validate_model_contents.py index dd782674ff..1ec08d9c5f 100644 --- a/openpype/hosts/max/plugins/publish/validate_model_contents.py +++ b/openpype/hosts/max/plugins/publish/validate_model_contents.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- import pyblish.api -from openpype.pipeline import PublishValidationError from pymxs import runtime as rt +from openpype.pipeline import PublishValidationError + class ValidateModelContent(pyblish.api.InstancePlugin): """Validates Model instance contents. @@ -17,28 +18,26 @@ class ValidateModelContent(pyblish.api.InstancePlugin): label = "Model Contents" def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError("Model instance must only include" - "Geometry and Editable Mesh") + if invalid := self.get_invalid(instance): # noqa + raise PublishValidationError(("Model instance must only include" + "Geometry and Editable Mesh. " + f"Invalid types on: {invalid}")) def get_invalid(self, instance): """ Get invalid nodes if the instance is not camera """ - invalid = list() + invalid = [] container = instance.data["instance_node"] - self.log.info("Validating look content for " - "{}".format(container)) + self.log.info(f"Validating model content for {container}") - con = rt.getNodeByName(container) - selection_list = list(con.Children) or rt.getCurrentSelection() + selection_list = instance.data["members"] for sel in selection_list: - if rt.classOf(sel) in rt.Camera.classes: + if rt.ClassOf(sel) in rt.Camera.classes: invalid.append(sel) - if rt.classOf(sel) in rt.Light.classes: + if rt.ClassOf(sel) in rt.Light.classes: invalid.append(sel) - if rt.classOf(sel) in rt.Shape.classes: + if rt.ClassOf(sel) in rt.Shape.classes: invalid.append(sel) return invalid diff --git a/openpype/hosts/max/plugins/publish/validate_no_max_content.py b/openpype/hosts/max/plugins/publish/validate_no_max_content.py index c20a1968ed..ba4a6882c2 100644 --- a/openpype/hosts/max/plugins/publish/validate_no_max_content.py +++ b/openpype/hosts/max/plugins/publish/validate_no_max_content.py @@ -18,6 +18,5 @@ class ValidateMaxContents(pyblish.api.InstancePlugin): label = "Max Scene Contents" def process(self, instance): - container = rt.getNodeByName(instance.data["instance_node"]) - if not list(container.Children): + if not instance.data["members"]: raise PublishValidationError("No content found in the container") diff --git a/openpype/hosts/max/plugins/publish/validate_pointcloud.py b/openpype/hosts/max/plugins/publish/validate_pointcloud.py index f654058648..e1c2151c9d 100644 --- a/openpype/hosts/max/plugins/publish/validate_pointcloud.py +++ b/openpype/hosts/max/plugins/publish/validate_pointcloud.py @@ -9,11 +9,11 @@ def get_setting(project_setting=None): project_setting = get_project_settings( legacy_io.Session["AVALON_PROJECT"] ) - return (project_setting["max"]["PointCloud"]) + return project_setting["max"]["PointCloud"] class ValidatePointCloud(pyblish.api.InstancePlugin): - """Validate that workfile was saved.""" + """Validate that work file was saved.""" order = pyblish.api.ValidatorOrder families = ["pointcloud"] @@ -34,39 +34,37 @@ class ValidatePointCloud(pyblish.api.InstancePlugin): of export_particle operator """ - invalid = self.get_tyFlow_object(instance) - if invalid: - raise PublishValidationError("Non tyFlow object " - "found: {}".format(invalid)) - invalid = self.get_tyFlow_operator(instance) - if invalid: - raise PublishValidationError("tyFlow ExportParticle operator " - "not found: {}".format(invalid)) + report = [] + if invalid := self.get_tyflow_object(instance): # noqa + report.append(f"Non tyFlow object found: {invalid}") - invalid = self.validate_export_mode(instance) - if invalid: - raise PublishValidationError("The export mode is not at PRT") + if invalid := self.get_tyflow_operator(instance): # noqa + report.append( + f"tyFlow ExportParticle operator not found: {invalid}") - invalid = self.validate_partition_value(instance) - if invalid: - raise PublishValidationError("tyFlow Partition setting is " - "not at the default value") - invalid = self.validate_custom_attribute(instance) - if invalid: - raise PublishValidationError("Custom Attribute not found " - ":{}".format(invalid)) + if self.validate_export_mode(instance): + report.append("The export mode is not at PRT") - def get_tyFlow_object(self, instance): + if self.validate_partition_value(instance): + report.append(("tyFlow Partition setting is " + "not at the default value")) + + if invalid := self.validate_custom_attribute(instance): # noqa + report.append(("Custom Attribute not found " + f":{invalid}")) + + if report: + raise PublishValidationError(f"{report}") + + def get_tyflow_object(self, instance): invalid = [] container = instance.data["instance_node"] - self.log.info("Validating tyFlow container " - "for {}".format(container)) + self.log.info(f"Validating tyFlow container for {container}") - con = rt.getNodeByName(container) - selection_list = list(con.Children) + selection_list = instance.data["members"] for sel in selection_list: sel_tmp = str(sel) - if rt.classOf(sel) in [rt.tyFlow, + if rt.ClassOf(sel) in [rt.tyFlow, rt.Editable_Mesh]: if "tyFlow" not in sel_tmp: invalid.append(sel) @@ -75,23 +73,20 @@ class ValidatePointCloud(pyblish.api.InstancePlugin): return invalid - def get_tyFlow_operator(self, instance): + def get_tyflow_operator(self, instance): invalid = [] container = instance.data["instance_node"] - self.log.info("Validating tyFlow object " - "for {}".format(container)) - - con = rt.getNodeByName(container) - selection_list = list(con.Children) + self.log.info(f"Validating tyFlow object for {container}") + selection_list = instance.data["members"] bool_list = [] for sel in selection_list: obj = sel.baseobject - anim_names = rt.getsubanimnames(obj) + anim_names = rt.GetSubAnimNames(obj) for anim_name in anim_names: # get all the names of the related tyFlow nodes - sub_anim = rt.getsubanim(obj, anim_name) + sub_anim = rt.GetSubAnim(obj, anim_name) # check if there is export particle operator - boolean = rt.isProperty(sub_anim, "Export_Particles") + boolean = rt.IsProperty(sub_anim, "Export_Particles") bool_list.append(str(boolean)) # if the export_particles property is not there # it means there is not a "Export Particle" operator @@ -104,21 +99,18 @@ class ValidatePointCloud(pyblish.api.InstancePlugin): def validate_custom_attribute(self, instance): invalid = [] container = instance.data["instance_node"] - self.log.info("Validating tyFlow custom " - "attributes for {}".format(container)) + self.log.info( + f"Validating tyFlow custom attributes for {container}") - con = rt.getNodeByName(container) - selection_list = list(con.Children) + selection_list = instance.data["members"] for sel in selection_list: obj = sel.baseobject - anim_names = rt.getsubanimnames(obj) + anim_names = rt.GetSubAnimNames(obj) for anim_name in anim_names: # get all the names of the related tyFlow nodes - sub_anim = rt.getsubanim(obj, anim_name) - # check if there is export particle operator - boolean = rt.isProperty(sub_anim, "Export_Particles") - event_name = sub_anim.name - if boolean: + sub_anim = rt.GetSubAnim(obj, anim_name) + if rt.IsProperty(sub_anim, "Export_Particles"): + event_name = sub_anim.name opt = "${0}.{1}.export_particles".format(sel.name, event_name) attributes = get_setting()["attribute"] @@ -126,39 +118,36 @@ class ValidatePointCloud(pyblish.api.InstancePlugin): custom_attr = "{0}.PRTChannels_{1}".format(opt, value) try: - rt.execute(custom_attr) + rt.Execute(custom_attr) except RuntimeError: - invalid.add(key) + invalid.append(key) return invalid def validate_partition_value(self, instance): invalid = [] container = instance.data["instance_node"] - self.log.info("Validating tyFlow partition " - "value for {}".format(container)) + self.log.info( + f"Validating tyFlow partition value for {container}") - con = rt.getNodeByName(container) - selection_list = list(con.Children) + selection_list = instance.data["members"] for sel in selection_list: obj = sel.baseobject - anim_names = rt.getsubanimnames(obj) + anim_names = rt.GetSubAnimNames(obj) for anim_name in anim_names: # get all the names of the related tyFlow nodes - sub_anim = rt.getsubanim(obj, anim_name) - # check if there is export particle operator - boolean = rt.isProperty(sub_anim, "Export_Particles") - event_name = sub_anim.name - if boolean: + sub_anim = rt.GetSubAnim(obj, anim_name) + if rt.IsProperty(sub_anim, "Export_Particles"): + event_name = sub_anim.name opt = "${0}.{1}.export_particles".format(sel.name, event_name) - count = rt.execute(f'{opt}.PRTPartitionsCount') + count = rt.Execute(f'{opt}.PRTPartitionsCount') if count != 100: invalid.append(count) - start = rt.execute(f'{opt}.PRTPartitionsFrom') + start = rt.Execute(f'{opt}.PRTPartitionsFrom') if start != 1: invalid.append(start) - end = rt.execute(f'{opt}.PRTPartitionsTo') + end = rt.Execute(f'{opt}.PRTPartitionsTo') if end != 1: invalid.append(end) @@ -167,24 +156,23 @@ class ValidatePointCloud(pyblish.api.InstancePlugin): def validate_export_mode(self, instance): invalid = [] container = instance.data["instance_node"] - self.log.info("Validating tyFlow export " - "mode for {}".format(container)) + self.log.info( + f"Validating tyFlow export mode for {container}") - con = rt.getNodeByName(container) + con = rt.GetNodeByName(container) selection_list = list(con.Children) for sel in selection_list: obj = sel.baseobject - anim_names = rt.getsubanimnames(obj) + anim_names = rt.GetSubAnimNames(obj) for anim_name in anim_names: # get all the names of the related tyFlow nodes - sub_anim = rt.getsubanim(obj, anim_name) + sub_anim = rt.GetSubAnim(obj, anim_name) # check if there is export particle operator - boolean = rt.isProperty(sub_anim, "Export_Particles") + boolean = rt.IsProperty(sub_anim, "Export_Particles") event_name = sub_anim.name if boolean: - opt = "${0}.{1}.export_particles".format(sel.name, - event_name) - export_mode = rt.execute(f'{opt}.exportMode') + opt = f"${sel.name}.{event_name}.export_particles" + export_mode = rt.Execute(f'{opt}.exportMode') if export_mode != 1: invalid.append(export_mode) diff --git a/openpype/hosts/max/plugins/publish/validate_usd_plugin.py b/openpype/hosts/max/plugins/publish/validate_usd_plugin.py index 747147020a..9957e62736 100644 --- a/openpype/hosts/max/plugins/publish/validate_usd_plugin.py +++ b/openpype/hosts/max/plugins/publish/validate_usd_plugin.py @@ -1,36 +1,37 @@ # -*- coding: utf-8 -*- -import pyblish.api +"""Validator for USD plugin.""" from openpype.pipeline import PublishValidationError +from pyblish.api import InstancePlugin, ValidatorOrder from pymxs import runtime as rt -class ValidateUSDPlugin(pyblish.api.InstancePlugin): - """Validates if USD plugin is installed or loaded in Max - """ +def get_plugins() -> list: + """Get plugin list from 3ds max.""" + manager = rt.PluginManager + count = manager.pluginDllCount + plugin_info_list = [] + for p in range(1, count + 1): + plugin_info = manager.pluginDllName(p) + plugin_info_list.append(plugin_info) - order = pyblish.api.ValidatorOrder - 0.01 + return plugin_info_list + + +class ValidateUSDPlugin(InstancePlugin): + """Validates if USD plugin is installed or loaded in 3ds max.""" + + order = ValidatorOrder - 0.01 families = ["model"] hosts = ["max"] label = "USD Plugin" def process(self, instance): - plugin_mgr = rt.pluginManager - plugin_count = plugin_mgr.pluginDllCount - plugin_info = self.get_plugins(plugin_mgr, - plugin_count) + """Plugin entry point.""" + + plugin_info = get_plugins() usd_import = "usdimport.dli" if usd_import not in plugin_info: - raise PublishValidationError("USD Plugin {}" - " not found".format(usd_import)) + raise PublishValidationError(f"USD Plugin {usd_import} not found") usd_export = "usdexport.dle" if usd_export not in plugin_info: - raise PublishValidationError("USD Plugin {}" - " not found".format(usd_export)) - - def get_plugins(self, manager, count): - plugin_info_list = list() - for p in range(1, count + 1): - plugin_info = manager.pluginDllName(p) - plugin_info_list.append(plugin_info) - - return plugin_info_list + raise PublishValidationError(f"USD Plugin {usd_export} not found") diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/openpype/hosts/maya/plugins/load/load_arnold_standin.py index 7c3a732389..38a7adfd7d 100644 --- a/openpype/hosts/maya/plugins/load/load_arnold_standin.py +++ b/openpype/hosts/maya/plugins/load/load_arnold_standin.py @@ -35,9 +35,15 @@ class ArnoldStandinLoader(load.LoaderPlugin): color = "orange" def load(self, context, name, namespace, options): + if not cmds.pluginInfo("mtoa", query=True, loaded=True): + cmds.loadPlugin("mtoa") + # Create defaultArnoldRenderOptions before creating aiStandin + # which tries to connect it. Since we load the plugin and directly + # create aiStandin without the defaultArnoldRenderOptions, + # we need to create the render options for aiStandin creation. + from mtoa.core import createOptions + createOptions() - # Make sure to load arnold before importing `mtoa.ui.arnoldmenu` - cmds.loadPlugin("mtoa", quiet=True) import mtoa.ui.arnoldmenu version = context['version'] diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 777f4454dc..c05182ce97 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -2020,11 +2020,11 @@ class WorkfileSettings(object): # TODO: backward compatibility for old projects - remove later # perhaps old project overrides is having it set to older version # with use of `customOCIOConfigPath` + resolved_path = None if workfile_settings.get("customOCIOConfigPath"): unresolved_path = workfile_settings["customOCIOConfigPath"] ocio_paths = unresolved_path[platform.system().lower()] - resolved_path = None for ocio_p in ocio_paths: resolved_path = str(ocio_p).format(**os.environ) if not os.path.exists(resolved_path): @@ -2054,9 +2054,9 @@ class WorkfileSettings(object): self._root_node["colorManagement"].setValue("OCIO") # we dont need the key anymore - workfile_settings.pop("customOCIOConfigPath") - workfile_settings.pop("colorManagement") - workfile_settings.pop("OCIO_config") + workfile_settings.pop("customOCIOConfigPath", None) + workfile_settings.pop("colorManagement", None) + workfile_settings.pop("OCIO_config", None) # then set the rest for knob, value_ in workfile_settings.items(): diff --git a/openpype/hosts/nuke/startup/custom_write_node.py b/openpype/hosts/nuke/startup/custom_write_node.py index d9313231d8..ea53725834 100644 --- a/openpype/hosts/nuke/startup/custom_write_node.py +++ b/openpype/hosts/nuke/startup/custom_write_node.py @@ -1,9 +1,14 @@ +""" OpenPype custom script for setting up write nodes for non-publish """ import os import nuke -from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings +import nukescripts +from openpype.pipeline import Anatomy +from openpype.hosts.nuke.api.lib import ( + set_node_knobs_from_settings, + get_nuke_imageio_settings +) -frame_padding = 5 temp_rendering_path_template = ( "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}") @@ -53,24 +58,94 @@ knobs_setting = { } -def main(): - write_selected_nodes = [ - s for s in nuke.selectedNodes() if s.Class() == "Write"] +class WriteNodeKnobSettingPanel(nukescripts.PythonPanel): + """ Write Node's Knobs Settings Panel """ + def __init__(self): + nukescripts.PythonPanel.__init__(self, "Set Knobs Value(Write Node)") - ext = None - knobs = knobs_setting["knobs"] - for knob in knobs: - if knob["name"] == "file_type": - ext = knob["value"] - for w in write_selected_nodes: - # data for mapping the path - data = { - "work": os.getenv("AVALON_WORKDIR"), - "subset": w["name"].value(), - "frame": "#" * frame_padding, - "ext": ext - } - file_path = temp_rendering_path_template.format(**data) - file_path = file_path.replace("\\", "/") - w["file"].setValue(file_path) - set_node_knobs_from_settings(w, knobs) + preset_name, _ = self.get_node_knobs_setting() + # create knobs + + self.selected_preset_name = nuke.Enumeration_Knob( + 'preset_selector', 'presets', preset_name) + # add knobs to panel + self.addKnob(self.selected_preset_name) + + def process(self): + """ Process the panel values. """ + write_selected_nodes = [ + selected_nodes for selected_nodes in nuke.selectedNodes() + if selected_nodes.Class() == "Write"] + + selected_preset = self.selected_preset_name.value() + ext = None + knobs = knobs_setting["knobs"] + preset_name, node_knobs_presets = ( + self.get_node_knobs_setting(selected_preset) + ) + + if selected_preset and preset_name: + if not node_knobs_presets: + nuke.message( + "No knobs value found in subset group.." + "\nDefault setting will be used..") + else: + knobs = node_knobs_presets + + ext_knob_list = [knob for knob in knobs if knob["name"] == "file_type"] + if not ext_knob_list: + nuke.message( + "ERROR: No file type found in the subset's knobs." + "\nPlease add one to complete setting up the node") + return + else: + for knob in ext_knob_list: + ext = knob["value"] + + anatomy = Anatomy() + + frame_padding = int( + anatomy.templates["render"].get( + "frame_padding" + ) + ) + for write_node in write_selected_nodes: + # data for mapping the path + data = { + "work": os.getenv("AVALON_WORKDIR"), + "subset": write_node["name"].value(), + "frame": "#" * frame_padding, + "ext": ext + } + file_path = temp_rendering_path_template.format(**data) + file_path = file_path.replace("\\", "/") + write_node["file"].setValue(file_path) + set_node_knobs_from_settings(write_node, knobs) + + def get_node_knobs_setting(self, selected_preset=None): + preset_name = [] + knobs_nodes = [] + settings = [ + node_settings for node_settings + in get_nuke_imageio_settings()["nodes"]["overrideNodes"] + if node_settings["nukeNodeClass"] == "Write" + and node_settings["subsets"] + ] + if not settings: + return + + for i, _ in enumerate(settings): + if selected_preset in settings[i]["subsets"]: + knobs_nodes = settings[i]["knobs"] + + for setting in settings: + for subset in setting["subsets"]: + preset_name.append(subset) + + return preset_name, knobs_nodes + + +def main(): + p_ = WriteNodeKnobSettingPanel() + if p_.showModalDialog(): + print(p_.process()) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py index 96aaae23dc..8fa53f5f48 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -222,7 +222,6 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): "label": subset, "name": subset, "family": in_data["family"], - # "version": in_data.get("version", 1), "frameStart": in_data.get("representations", [None])[0].get( "frameStart", None ), @@ -232,6 +231,14 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): "families": instance_families } ) + # Fill version only if 'use_next_available_version' is disabled + # and version is filled in instance data + version = in_data.get("version") + use_next_available_version = in_data.get( + "use_next_available_version", True) + if not use_next_available_version and version is not None: + instance.data["version"] = version + self.log.info("collected instance: {}".format(pformat(instance.data))) self.log.info("parsing data: {}".format(pformat(in_data))) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py index 75930f0f31..36e041a32c 100644 --- a/openpype/hosts/traypublisher/api/plugin.py +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -1,4 +1,14 @@ -from openpype.lib.attribute_definitions import FileDef +from openpype.client import ( + get_assets, + get_subsets, + get_last_versions, +) +from openpype.lib.attribute_definitions import ( + FileDef, + BoolDef, + NumberDef, + UISeparatorDef, +) from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS from openpype.pipeline.create import ( Creator, @@ -94,6 +104,7 @@ class TrayPublishCreator(Creator): class SettingsCreator(TrayPublishCreator): create_allow_context_change = True create_allow_thumbnail = True + allow_version_control = False extensions = [] @@ -101,8 +112,18 @@ class SettingsCreator(TrayPublishCreator): # Pass precreate data to creator attributes thumbnail_path = pre_create_data.pop(PRE_CREATE_THUMBNAIL_KEY, None) + # Fill 'version_to_use' if version control is enabled + if self.allow_version_control: + asset_name = data["asset"] + subset_docs_by_asset_id = self._prepare_next_versions( + [asset_name], [subset_name]) + version = subset_docs_by_asset_id[asset_name].get(subset_name) + pre_create_data["version_to_use"] = version + data["_previous_last_version"] = version + data["creator_attributes"] = pre_create_data data["settings_creator"] = True + # Create new instance new_instance = CreatedInstance(self.family, subset_name, data, self) @@ -111,7 +132,158 @@ class SettingsCreator(TrayPublishCreator): if thumbnail_path: self.set_instance_thumbnail_path(new_instance.id, thumbnail_path) + def _prepare_next_versions(self, asset_names, subset_names): + """Prepare next versions for given asset and subset names. + + Todos: + Expect combination of subset names by asset name to avoid + unnecessary server calls for unused subsets. + + Args: + asset_names (Iterable[str]): Asset names. + subset_names (Iterable[str]): Subset names. + + Returns: + dict[str, dict[str, int]]: Last versions by asset + and subset names. + """ + + # Prepare all versions for all combinations to '1' + subset_docs_by_asset_id = { + asset_name: { + subset_name: 1 + for subset_name in subset_names + } + for asset_name in asset_names + } + if not asset_names or not subset_names: + return subset_docs_by_asset_id + + asset_docs = get_assets( + self.project_name, + asset_names=asset_names, + fields=["_id", "name"] + ) + asset_names_by_id = { + asset_doc["_id"]: asset_doc["name"] + for asset_doc in asset_docs + } + subset_docs = list(get_subsets( + self.project_name, + asset_ids=asset_names_by_id.keys(), + subset_names=subset_names, + fields=["_id", "name", "parent"] + )) + + subset_ids = {subset_doc["_id"] for subset_doc in subset_docs} + last_versions = get_last_versions( + self.project_name, + subset_ids, + fields=["name", "parent"]) + + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + asset_name = asset_names_by_id[asset_id] + subset_name = subset_doc["name"] + subset_id = subset_doc["_id"] + last_version = last_versions.get(subset_id) + version = 0 + if last_version is not None: + version = last_version["name"] + subset_docs_by_asset_id[asset_name][subset_name] += version + return subset_docs_by_asset_id + + def _fill_next_versions(self, instances_data): + """Fill next version for instances. + + Instances have also stored previous next version to be able to + recognize if user did enter different version. If version was + not changed by user, or user set it to '0' the next version will be + updated by current database state. + """ + + filtered_instance_data = [] + for instance in instances_data: + previous_last_version = instance.get("_previous_last_version") + creator_attributes = instance["creator_attributes"] + use_next_version = creator_attributes.get( + "use_next_version", True) + version = creator_attributes.get("version_to_use", 0) + if ( + use_next_version + or version == 0 + or version == previous_last_version + ): + filtered_instance_data.append(instance) + + asset_names = { + instance["asset"] + for instance in filtered_instance_data} + subset_names = { + instance["subset"] + for instance in filtered_instance_data} + subset_docs_by_asset_id = self._prepare_next_versions( + asset_names, subset_names + ) + for instance in filtered_instance_data: + asset_name = instance["asset"] + subset_name = instance["subset"] + version = subset_docs_by_asset_id[asset_name][subset_name] + instance["creator_attributes"]["version_to_use"] = version + instance["_previous_last_version"] = version + + def collect_instances(self): + """Collect instances from host. + + Overriden to be able to manage version control attributes. If version + control is disabled, the attributes will be removed from instances, + and next versions are filled if is version control enabled. + """ + + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + instances = instances_by_identifier[self.identifier] + if not instances: + return + + if self.allow_version_control: + self._fill_next_versions(instances) + + for instance_data in instances: + # Make sure that there are not data related to version control + # if plugin does not support it + if not self.allow_version_control: + instance_data.pop("_previous_last_version", None) + creator_attributes = instance_data["creator_attributes"] + creator_attributes.pop("version_to_use", None) + creator_attributes.pop("use_next_version", None) + + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + def get_instance_attr_defs(self): + defs = self.get_pre_create_attr_defs() + if self.allow_version_control: + defs += [ + UISeparatorDef(), + BoolDef( + "use_next_version", + default=True, + label="Use next version", + ), + NumberDef( + "version_to_use", + default=1, + minimum=0, + maximum=999, + label="Version to use", + ) + ] + return defs + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes return [ FileDef( "representation_files", @@ -132,10 +304,6 @@ class SettingsCreator(TrayPublishCreator): ) ] - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attrobites - return self.get_instance_attr_defs() - @classmethod def from_settings(cls, item_data): identifier = item_data["identifier"] @@ -155,6 +323,8 @@ class SettingsCreator(TrayPublishCreator): "extensions": item_data["extensions"], "allow_sequences": item_data["allow_sequences"], "allow_multiple_items": item_data["allow_multiple_items"], - "default_variants": item_data["default_variants"] + "allow_version_control": item_data.get( + "allow_version_control", False), + "default_variants": item_data["default_variants"], } ) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py index c081216481..3fa3c3b8c8 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -47,6 +47,8 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): "Created temp staging directory for instance {}. {}" ).format(instance_label, tmp_folder)) + self._fill_version(instance, instance_label) + # Store filepaths for validation of their existence source_filepaths = [] # Make sure there are no representations with same name @@ -93,6 +95,28 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): ) ) + def _fill_version(self, instance, instance_label): + """Fill instance version under which will be instance integrated. + + Instance must have set 'use_next_version' to 'False' + and 'version_to_use' to version to use. + + Args: + instance (pyblish.api.Instance): Instance to fill version for. + instance_label (str): Label of instance to fill version for. + """ + + creator_attributes = instance.data["creator_attributes"] + use_next_version = creator_attributes.get("use_next_version", True) + # If 'version_to_use' is '0' it means that next version should be used + version_to_use = creator_attributes.get("version_to_use", 0) + if use_next_version or not version_to_use: + return + instance.data["version"] = version_to_use + self.log.debug( + "Version for instance \"{}\" was set to \"{}\"".format( + instance_label, version_to_use)) + def _create_main_representations( self, instance, diff --git a/openpype/hosts/traypublisher/plugins/publish/help/validate_existing_version.xml b/openpype/hosts/traypublisher/plugins/publish/help/validate_existing_version.xml new file mode 100644 index 0000000000..8a3b8f4d7d --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/help/validate_existing_version.xml @@ -0,0 +1,16 @@ + + + +Version already exists + +## Version already exists + +Version {version} you have set on instance '{subset_name}' under '{asset_name}' already exists. This validation is enabled by default to prevent accidental override of existing versions. + +### How to repair? +- Click on 'Repair' action -> this will change version to next available. +- Disable validation on the instance if you are sure you want to override the version. +- Reset publishing and manually change the version number. + + + diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_existing_version.py b/openpype/hosts/traypublisher/plugins/publish/validate_existing_version.py new file mode 100644 index 0000000000..1fb27acdeb --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_existing_version.py @@ -0,0 +1,57 @@ +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin, + RepairAction, +) + + +class ValidateExistingVersion( + OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin +): + label = "Validate Existing Version" + order = ValidateContentsOrder + + hosts = ["traypublisher"] + + actions = [RepairAction] + + settings_category = "traypublisher" + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + version = instance.data.get("version") + if version is None: + return + + last_version = instance.data.get("latestVersion") + if last_version is None or last_version < version: + return + + subset_name = instance.data["subset"] + msg = "Version {} already exists for subset {}.".format( + version, subset_name) + + formatting_data = { + "subset_name": subset_name, + "asset_name": instance.data["asset"], + "version": version + } + raise PublishXmlValidationError( + self, msg, formatting_data=formatting_data) + + @classmethod + def repair(cls, instance): + create_context = instance.context.data["create_context"] + created_instance = create_context.get_instance_by_id( + instance.data["instance_id"]) + creator_attributes = created_instance["creator_attributes"] + # Disable version override + creator_attributes["use_next_version"] = True + create_context.save_changes() diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index 55a96664d8..91a5b76e35 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -125,6 +125,7 @@ def pack_project( if not only_documents: roots = project_doc["config"]["roots"] # Determine root directory of project + source_root = None source_root_name = None for root_name, root_value in roots.items(): if source_root is not None: diff --git a/openpype/modules/README.md b/openpype/modules/README.md index 86afdb9d91..ce3f99b338 100644 --- a/openpype/modules/README.md +++ b/openpype/modules/README.md @@ -138,7 +138,8 @@ class ClockifyModule( "publish": [], "create": [], "load": [], - "actions": [] + "actions": [], + "inventory": [] } ``` diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 732525b6eb..fb9b4e1096 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -740,15 +740,16 @@ class ModulesManager: Unknown keys are logged out. Returns: - dict: Output is dictionary with keys "publish", "create", "load" - and "actions" each containing list of paths. + dict: Output is dictionary with keys "publish", "create", "load", + "actions" and "inventory" each containing list of paths. """ # Output structure output = { "publish": [], "create": [], "load": [], - "actions": [] + "actions": [], + "inventory": [] } unknown_keys_by_module = {} for module in self.get_enabled_modules(): @@ -853,6 +854,21 @@ class ModulesManager: host_name ) + def collect_inventory_action_paths(self, host_name): + """Helper to collect load plugin paths from modules. + + Args: + host_name (str): For which host are load plugins meant. + + Returns: + list: List of pyblish plugin paths. + """ + + return self._collect_plugin_paths( + "get_inventory_action_paths", + host_name + ) + def get_host_module(self, host_name): """Find host module by host name. diff --git a/openpype/modules/interfaces.py b/openpype/modules/interfaces.py index 8c9a6ee1dd..0d73bc35a3 100644 --- a/openpype/modules/interfaces.py +++ b/openpype/modules/interfaces.py @@ -33,8 +33,8 @@ class OpenPypeInterface: class IPluginPaths(OpenPypeInterface): """Module has plugin paths to return. - Expected result is dictionary with keys "publish", "create", "load" or - "actions" and values as list or string. + Expected result is dictionary with keys "publish", "create", "load", + "actions" or "inventory" and values as list or string. { "publish": ["path/to/publish_plugins"] } @@ -109,6 +109,21 @@ class IPluginPaths(OpenPypeInterface): return self._get_plugin_paths_by_type("publish") + def get_inventory_action_paths(self, host_name): + """Receive inventory action paths. + + Give addons ability to add inventory action plugin paths. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all publish plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + return self._get_plugin_paths_by_type("inventory") + class ILaunchHookPaths(OpenPypeInterface): """Module has launch hook paths to return. @@ -395,13 +410,11 @@ class ITrayService(ITrayModule): class ISettingsChangeListener(OpenPypeInterface): - """Module has plugin paths to return. + """Module tries to listen to settings changes. + + Only settings changes in the current process are propagated. + Changes made in other processes or machines won't trigger the callbacks. - Expected result is dictionary with keys "publish", "create", "load" or - "actions" and values as list or string. - { - "publish": ["path/to/publish_plugins"] - } """ @abstractmethod diff --git a/openpype/pipeline/colorspace.py b/openpype/pipeline/colorspace.py index 899b14148b..1999ad3bed 100644 --- a/openpype/pipeline/colorspace.py +++ b/openpype/pipeline/colorspace.py @@ -312,7 +312,8 @@ def get_views_data_subprocess(config_path): def get_imageio_config( - project_name, host_name, + project_name, + host_name, project_settings=None, anatomy_data=None, anatomy=None @@ -325,12 +326,9 @@ def get_imageio_config( Args: project_name (str): project name host_name (str): host name - project_settings (dict, optional): project settings. - Defaults to None. - anatomy_data (dict, optional): anatomy formatting data. - Defaults to None. - anatomy (lib.Anatomy, optional): Anatomy object. - Defaults to None. + project_settings (Optional[dict]): Project settings. + anatomy_data (Optional[dict]): anatomy formatting data. + anatomy (Optional[Anatomy]): Anatomy object. Returns: dict: config path data or empty dict @@ -345,37 +343,36 @@ def get_imageio_config( formatting_data = deepcopy(anatomy_data) - # add project roots to anatomy data + # Add project roots to anatomy data formatting_data["root"] = anatomy.roots formatting_data["platform"] = platform.system().lower() - # get colorspace settings - # check if global settings group is having activate_global_color_management - # key at all. If it does't then default it to False - # this is for backward compatibility only - # TODO: in future rewrite this to be more explicit + # Get colorspace settings imageio_global, imageio_host = _get_imageio_settings( project_settings, host_name) - activate_color_management = ( - imageio_global.get("activate_global_color_management", False) - # for already saved overrides from previous version - # TODO: remove this in future - backward compatibility - or imageio_host.get("ocio_config").get("enabled") - ) + # Host 'ocio_config' is optional + host_ocio_config = imageio_host.get("ocio_config") or {} + + # Global color management must be enabled to be able to use host settings + activate_color_management = imageio_global.get( + "activate_global_color_management") + # TODO: remove this in future - backward compatibility + # For already saved overrides from previous version look for 'enabled' + # on host settings. + if activate_color_management is None: + activate_color_management = host_ocio_config.get("enabled", False) if not activate_color_management: # if global settings are disabled return empty dict because # it is expected that no colorspace management is needed - log.info( - "Colorspace management is disabled globally." - ) + log.info("Colorspace management is disabled globally.") return {} - # check if host settings group is having activate_host_color_management - # if it does not have activation key then default it to True so it uses - # global settings - # this is for backward compatibility + # Check if host settings group is having 'activate_host_color_management' + # - if it does not have activation key then default it to True so it uses + # global settings + # This is for backward compatibility. # TODO: in future rewrite this to be more explicit activate_host_color_management = imageio_host.get( "activate_host_color_management", True) @@ -389,21 +386,18 @@ def get_imageio_config( ) return {} - config_host = imageio_host.get("ocio_config", {}) - - # get config path from either global or host_name + # get config path from either global or host settings # depending on override flag # TODO: in future rewrite this to be more explicit - config_data = None - override_global_config = ( - config_host.get("override_global_config") + override_global_config = host_ocio_config.get("override_global_config") + if override_global_config is None: # for already saved overrides from previous version # TODO: remove this in future - backward compatibility - or config_host.get("enabled") - ) + override_global_config = host_ocio_config.get("enabled") + if override_global_config: config_data = _get_config_data( - config_host["filepath"], formatting_data + host_ocio_config["filepath"], formatting_data ) else: # get config path from global @@ -507,34 +501,35 @@ def get_imageio_file_rules(project_name, host_name, project_settings=None): frules_host = imageio_host.get("file_rules", {}) # compile file rules dictionary - activate_host_rules = ( - frules_host.get("activate_host_rules") + activate_host_rules = frules_host.get("activate_host_rules") + if activate_host_rules is None: # TODO: remove this in future - backward compatibility - or frules_host.get("enabled") - ) + activate_host_rules = frules_host.get("enabled", False) # return host rules if activated or global rules return frules_host["rules"] if activate_host_rules else global_rules def get_remapped_colorspace_to_native( - ocio_colorspace_name, host_name, imageio_host_settings): + ocio_colorspace_name, host_name, imageio_host_settings +): """Return native colorspace name. Args: ocio_colorspace_name (str | None): ocio colorspace name + host_name (str): Host name. + imageio_host_settings (dict[str, Any]): ImageIO host settings. Returns: - str: native colorspace name defined in remapping or None + Union[str, None]: native colorspace name defined in remapping or None """ - if not CashedData.remapping.get(host_name, {}).get("to_native"): + CashedData.remapping.setdefault(host_name, {}) + if CashedData.remapping[host_name].get("to_native") is None: remapping_rules = imageio_host_settings["remapping"]["rules"] - CashedData.remapping[host_name] = { - "to_native": { - rule["ocio_name"]: input["host_native_name"] - for rule in remapping_rules - } + CashedData.remapping[host_name]["to_native"] = { + rule["ocio_name"]: rule["host_native_name"] + for rule in remapping_rules } return CashedData.remapping[host_name]["to_native"].get( @@ -542,23 +537,25 @@ def get_remapped_colorspace_to_native( def get_remapped_colorspace_from_native( - host_native_colorspace_name, host_name, imageio_host_settings): + host_native_colorspace_name, host_name, imageio_host_settings +): """Return ocio colorspace name remapped from host native used name. Args: host_native_colorspace_name (str): host native colorspace name + host_name (str): Host name. + imageio_host_settings (dict[str, Any]): ImageIO host settings. Returns: - str: ocio colorspace name defined in remapping or None + Union[str, None]: Ocio colorspace name defined in remapping or None. """ - if not CashedData.remapping.get(host_name, {}).get("from_native"): + CashedData.remapping.setdefault(host_name, {}) + if CashedData.remapping[host_name].get("from_native") is None: remapping_rules = imageio_host_settings["remapping"]["rules"] - CashedData.remapping[host_name] = { - "from_native": { - input["host_native_name"]: rule["ocio_name"] - for rule in remapping_rules - } + CashedData.remapping[host_name]["from_native"] = { + rule["host_native_name"]: rule["ocio_name"] + for rule in remapping_rules } return CashedData.remapping[host_name]["from_native"].get( diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py index ada78b989d..97a5c1ba69 100644 --- a/openpype/pipeline/context_tools.py +++ b/openpype/pipeline/context_tools.py @@ -181,6 +181,11 @@ def install_openpype_plugins(project_name=None, host_name=None): for path in load_plugin_paths: register_loader_plugin_path(path) + inventory_action_paths = modules_manager.collect_inventory_action_paths( + host_name) + for path in inventory_action_paths: + register_inventory_action_path(path) + if project_name is None: project_name = os.environ.get("AVALON_PROJECT") diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index 2fc0669732..332e271b0d 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -1441,6 +1441,19 @@ class CreateContext: """Access to global publish attributes.""" return self._publish_attributes + def get_instance_by_id(self, instance_id): + """Receive instance by id. + + Args: + instance_id (str): Instance id. + + Returns: + Union[CreatedInstance, None]: Instance or None if instance with + given id is not available. + """ + + return self._instances_by_id.get(instance_id) + def get_sorted_creators(self, identifiers=None): """Sorted creators by 'order' attribute. diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index 4888476fff..8806a13ca0 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -16,7 +16,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.5 def process(self, context): - create_context = context.data.pop("create_context", None) + create_context = context.data.get("create_context") if not create_context: host = registered_host() if isinstance(host, IPublishHost): diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json index 3a42c93515..4c2c2f1391 100644 --- a/openpype/settings/defaults/project_settings/traypublisher.json +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -23,6 +23,7 @@ "detailed_description": "Workfiles are full scenes from any application that are directly edited by artists. They represent a state of work on a task at a given point and are usually not directly referenced into other scenes.", "allow_sequences": false, "allow_multiple_items": false, + "allow_version_control": false, "extensions": [ ".ma", ".mb", @@ -57,6 +58,7 @@ "detailed_description": "Models should only contain geometry data, without any extras like cameras, locators or bones.\n\nKeep in mind that models published from tray publisher are not validated for correctness. ", "allow_sequences": false, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [ ".ma", ".mb", @@ -82,6 +84,7 @@ "detailed_description": "Alembic or bgeo cache of animated data", "allow_sequences": true, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [ ".abc", ".bgeo", @@ -105,6 +108,7 @@ "detailed_description": "Any type of image seqeuence coming from outside of the studio. Usually camera footage, but could also be animatics used for reference.", "allow_sequences": true, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [ ".exr", ".png", @@ -127,6 +131,7 @@ "detailed_description": "Sequence or single file renders", "allow_sequences": true, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [ ".exr", ".png", @@ -150,6 +155,7 @@ "detailed_description": "Ideally this should be only camera itself with baked animation, however, it can technically also include helper geometry.", "allow_sequences": false, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [ ".abc", ".ma", @@ -174,6 +180,7 @@ "detailed_description": "Any image data can be published as image family. References, textures, concept art, matte paints. This is a fallback 2d family for everything that doesn't fit more specific family.", "allow_sequences": false, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [ ".exr", ".jpg", @@ -197,6 +204,7 @@ "detailed_description": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids", "allow_sequences": true, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [ ".vdb" ] @@ -215,6 +223,7 @@ "detailed_description": "Script exported from matchmoving application to be later processed into a tracked camera with additional data", "allow_sequences": false, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [] }, { @@ -227,6 +236,7 @@ "detailed_description": "CG rigged character or prop. Rig should be clean of any extra data and directly loadable into it's respective application\t", "allow_sequences": false, "allow_multiple_items": false, + "allow_version_control": false, "extensions": [ ".ma", ".blend", @@ -244,6 +254,7 @@ "detailed_description": "Texture files with Unreal Engine naming conventions", "allow_sequences": false, "allow_multiple_items": true, + "allow_version_control": false, "extensions": [] } ], @@ -322,6 +333,11 @@ "enabled": true, "optional": true, "active": true + }, + "ValidateExistingVersion": { + "enabled": true, + "optional": true, + "active": true } } } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json index 3703d82856..e75e2887db 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -85,6 +85,12 @@ "label": "Allow multiple items", "type": "boolean" }, + { + "type": "boolean", + "key": "allow_version_control", + "label": "Allow version control", + "default": false + }, { "type": "list", "key": "extensions", @@ -346,6 +352,10 @@ { "key": "ValidateFrameRange", "label": "Validate frame range" + }, + { + "key": "ValidateExistingVersion", + "label": "Validate Existing Version" } ] } diff --git a/openpype/tools/project_manager/project_manager/widgets.py b/openpype/tools/project_manager/project_manager/widgets.py index 06ae06e4d2..3154f777df 100644 --- a/openpype/tools/project_manager/project_manager/widgets.py +++ b/openpype/tools/project_manager/project_manager/widgets.py @@ -1,4 +1,5 @@ import re +import platform from openpype.client import get_projects, create_project from .constants import ( @@ -8,13 +9,16 @@ from .constants import ( from openpype.client.operations import ( PROJECT_NAME_ALLOWED_SYMBOLS, PROJECT_NAME_REGEX, + OperationsSession, ) from openpype.style import load_stylesheet from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import ( PlaceholderLineEdit, - get_warning_pixmap + get_warning_pixmap, + PixmapLabel, ) +from openpype.settings.lib import get_default_anatomy_settings from qtpy import QtWidgets, QtCore, QtGui @@ -35,7 +39,7 @@ class NameTextEdit(QtWidgets.QLineEdit): sub_regex = "[^{}]+".format(NAME_ALLOWED_SYMBOLS) new_before_text = re.sub(sub_regex, "", before_text) new_after_text = re.sub(sub_regex, "", after_text) - idx -= (len(before_text) - len(new_before_text)) + idx -= len(before_text) - len(new_before_text) self.setText(new_before_text + new_after_text) self.setCursorPosition(idx) @@ -141,13 +145,40 @@ class CreateProjectDialog(QtWidgets.QDialog): inputs_widget = QtWidgets.QWidget(self) project_name_input = QtWidgets.QLineEdit(inputs_widget) project_code_input = QtWidgets.QLineEdit(inputs_widget) + project_width_input = NumScrollWidget(0, 9999999) + project_height_input = NumScrollWidget(0, 9999999) + project_fps_input = FloatScrollWidget(1, 9999999, decimals=3, step=1) + project_aspect_input = FloatScrollWidget( + 0, 9999999, decimals=2, step=0.1 + ) + project_frame_start_input = NumScrollWidget(-9999999, 9999999) + project_frame_end_input = NumScrollWidget(-9999999, 9999999) + + default_project_data = self.get_default_attributes() + project_width_input.setValue(default_project_data["resolutionWidth"]) + project_height_input.setValue(default_project_data["resolutionHeight"]) + project_fps_input.setValue(default_project_data["fps"]) + project_aspect_input.setValue(default_project_data["pixelAspect"]) + project_frame_start_input.setValue(default_project_data["frameStart"]) + project_frame_end_input.setValue(default_project_data["frameEnd"]) + library_project_input = QtWidgets.QCheckBox(inputs_widget) inputs_layout = QtWidgets.QFormLayout(inputs_widget) + if platform.system() == "Darwin": + inputs_layout.setFieldGrowthPolicy( + QtWidgets.QFormLayout.AllNonFixedFieldsGrow + ) inputs_layout.setContentsMargins(0, 0, 0, 0) inputs_layout.addRow("Project name:", project_name_input) inputs_layout.addRow("Project code:", project_code_input) inputs_layout.addRow("Library project:", library_project_input) + inputs_layout.addRow("Width:", project_width_input) + inputs_layout.addRow("Height:", project_height_input) + inputs_layout.addRow("FPS:", project_fps_input) + inputs_layout.addRow("Aspect:", project_aspect_input) + inputs_layout.addRow("Frame Start:", project_frame_start_input) + inputs_layout.addRow("Frame End:", project_frame_end_input) project_name_label = QtWidgets.QLabel(self) project_code_label = QtWidgets.QLabel(self) @@ -183,6 +214,12 @@ class CreateProjectDialog(QtWidgets.QDialog): self.project_name_input = project_name_input self.project_code_input = project_code_input self.library_project_input = library_project_input + self.project_width_input = project_width_input + self.project_height_input = project_height_input + self.project_fps_input = project_fps_input + self.project_aspect_input = project_aspect_input + self.project_frame_start_input = project_frame_start_input + self.project_frame_end_input = project_frame_end_input self.ok_btn = ok_btn @@ -190,6 +227,10 @@ class CreateProjectDialog(QtWidgets.QDialog): def project_name(self): return self.project_name_input.text() + def get_default_attributes(self): + settings = get_default_anatomy_settings() + return settings["attributes"] + def _on_project_name_change(self, value): if self._project_code_value is None: self._ignore_code_change = True @@ -215,12 +256,12 @@ class CreateProjectDialog(QtWidgets.QDialog): is_valid = False elif value in self.invalid_project_names: - message = "Project name \"{}\" already exist".format(value) + message = 'Project name "{}" already exist'.format(value) is_valid = False elif not PROJECT_NAME_REGEX.match(value): message = ( - "Project name \"{}\" contain not supported symbols" + 'Project name "{}" contain not supported symbols' ).format(value) is_valid = False @@ -237,12 +278,12 @@ class CreateProjectDialog(QtWidgets.QDialog): is_valid = False elif value in self.invalid_project_names: - message = "Project code \"{}\" already exist".format(value) + message = 'Project code "{}" already exist'.format(value) is_valid = False elif not PROJECT_NAME_REGEX.match(value): message = ( - "Project code \"{}\" contain not supported symbols" + 'Project code "{}" contain not supported symbols' ).format(value) is_valid = False @@ -264,9 +305,35 @@ class CreateProjectDialog(QtWidgets.QDialog): project_name = self.project_name_input.text() project_code = self.project_code_input.text() - library_project = self.library_project_input.isChecked() - create_project(project_name, project_code, library_project) + project_width = self.project_width_input.value() + project_height = self.project_height_input.value() + project_fps = self.project_fps_input.value() + project_aspect = self.project_aspect_input.value() + project_frame_start = self.project_frame_start_input.value() + project_frame_end = self.project_frame_end_input.value() + library_project = self.library_project_input.isChecked() + project_doc = create_project( + project_name, + project_code, + library_project, + ) + update_data = { + "data.resolutionWidth": project_width, + "data.resolutionHeight": project_height, + "data.fps": project_fps, + "data.pixelAspect": project_aspect, + "data.frameStart": project_frame_start, + "data.frameEnd": project_frame_end, + } + session = OperationsSession() + session.update_entity( + project_name, + project_doc["type"], + project_doc["_id"], + update_data, + ) + session.commit() self.done(1) def _get_existing_projects(self): @@ -288,45 +355,15 @@ class CreateProjectDialog(QtWidgets.QDialog): return project_names, project_codes -# TODO PixmapLabel should be moved to 'utils' in other future PR so should be -# imported from there -class PixmapLabel(QtWidgets.QLabel): - """Label resizing image to height of font.""" - def __init__(self, pixmap, parent): - super(PixmapLabel, self).__init__(parent) - self._empty_pixmap = QtGui.QPixmap(0, 0) - self._source_pixmap = pixmap - - def set_source_pixmap(self, pixmap): - """Change source image.""" - self._source_pixmap = pixmap - self._set_resized_pix() - +class ProjectManagerPixmapLabel(PixmapLabel): def _get_pix_size(self): size = self.fontMetrics().height() * 4 return size, size - def _set_resized_pix(self): - if self._source_pixmap is None: - self.setPixmap(self._empty_pixmap) - return - width, height = self._get_pix_size() - self.setPixmap( - self._source_pixmap.scaled( - width, - height, - QtCore.Qt.KeepAspectRatio, - QtCore.Qt.SmoothTransformation - ) - ) - - def resizeEvent(self, event): - self._set_resized_pix() - super(PixmapLabel, self).resizeEvent(event) - class ConfirmProjectDeletion(QtWidgets.QDialog): """Dialog which confirms deletion of a project.""" + def __init__(self, project_name, parent): super(ConfirmProjectDeletion, self).__init__(parent) @@ -335,23 +372,26 @@ class ConfirmProjectDeletion(QtWidgets.QDialog): top_widget = QtWidgets.QWidget(self) warning_pixmap = get_warning_pixmap() - warning_icon_label = PixmapLabel(warning_pixmap, top_widget) + warning_icon_label = ProjectManagerPixmapLabel( + warning_pixmap, top_widget + ) message_label = QtWidgets.QLabel(top_widget) message_label.setWordWrap(True) message_label.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) - message_label.setText(( - "WARNING: This cannot be undone.

" - "Project \"{}\" with all related data will be" - " permanently removed from the database. (This action won't remove" - " any files on disk.)" - ).format(project_name)) + message_label.setText( + ( + "WARNING: This cannot be undone.

" + 'Project "{}" with all related data will be' + " permanently removed from the database." + " (This action won't remove any files on disk.)" + ).format(project_name) + ) top_layout = QtWidgets.QHBoxLayout(top_widget) top_layout.setContentsMargins(0, 0, 0, 0) top_layout.addWidget( - warning_icon_label, 0, - QtCore.Qt.AlignTop | QtCore.Qt.AlignHCenter + warning_icon_label, 0, QtCore.Qt.AlignTop | QtCore.Qt.AlignHCenter ) top_layout.addWidget(message_label, 1) @@ -359,7 +399,7 @@ class ConfirmProjectDeletion(QtWidgets.QDialog): confirm_input = PlaceholderLineEdit(self) confirm_input.setPlaceholderText( - "Type \"{}\" to confirm...".format(project_name) + 'Type "{}" to confirm...'.format(project_name) ) cancel_btn = QtWidgets.QPushButton("Cancel", self) @@ -429,6 +469,7 @@ class ConfirmProjectDeletion(QtWidgets.QDialog): class SpinBoxScrollFixed(QtWidgets.QSpinBox): """QSpinBox which only allow edits change with scroll wheel when active""" + def __init__(self, *args, **kwargs): super(SpinBoxScrollFixed, self).__init__(*args, **kwargs) self.setFocusPolicy(QtCore.Qt.StrongFocus) @@ -442,6 +483,7 @@ class SpinBoxScrollFixed(QtWidgets.QSpinBox): class DoubleSpinBoxScrollFixed(QtWidgets.QDoubleSpinBox): """QDoubleSpinBox which only allow edits with scroll wheel when active""" + def __init__(self, *args, **kwargs): super(DoubleSpinBoxScrollFixed, self).__init__(*args, **kwargs) self.setFocusPolicy(QtCore.Qt.StrongFocus) @@ -451,3 +493,22 @@ class DoubleSpinBoxScrollFixed(QtWidgets.QDoubleSpinBox): event.ignore() else: super(DoubleSpinBoxScrollFixed, self).wheelEvent(event) + + +class NumScrollWidget(SpinBoxScrollFixed): + def __init__(self, minimum, maximum): + super(NumScrollWidget, self).__init__() + self.setMaximum(maximum) + self.setMinimum(minimum) + self.setButtonSymbols(QtWidgets.QSpinBox.NoButtons) + + +class FloatScrollWidget(DoubleSpinBoxScrollFixed): + def __init__(self, minimum, maximum, decimals, step=None): + super(FloatScrollWidget, self).__init__() + self.setMaximum(maximum) + self.setMinimum(minimum) + self.setDecimals(decimals) + if step is not None: + self.setSingleStep(step) + self.setButtonSymbols(QtWidgets.QSpinBox.NoButtons) diff --git a/openpype/tools/standalonepublish/widgets/widget_family.py b/openpype/tools/standalonepublish/widgets/widget_family.py index 11c5ec33b7..8c18a93a00 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family.py +++ b/openpype/tools/standalonepublish/widgets/widget_family.py @@ -128,7 +128,8 @@ class FamilyWidget(QtWidgets.QWidget): 'family_preset_key': key, 'family': family, 'subset': self.input_result.text(), - 'version': self.version_spinbox.value() + 'version': self.version_spinbox.value(), + 'use_next_available_version': self.version_checkbox.isChecked(), } return data diff --git a/openpype/version.py b/openpype/version.py index c44b1d29fb..3a218f3a06 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.15.11-nightly.2" +__version__ = "3.15.11-nightly.4"