diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index aa5b8decdc..e614d2fa65 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -35,6 +35,8 @@ body: label: Version description: What version are you running? Look to OpenPype Tray options: + - 3.15.10-nightly.2 + - 3.15.10-nightly.1 - 3.15.9 - 3.15.9-nightly.2 - 3.15.9-nightly.1 @@ -133,8 +135,6 @@ body: - 3.14.3-nightly.3 - 3.14.3-nightly.2 - 3.14.3-nightly.1 - - 3.14.2 - - 3.14.2-nightly.5 validations: required: true - type: dropdown diff --git a/openpype/client/entities.py b/openpype/client/entities.py index 8004dc3019..adbdd7a47c 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -855,12 +855,13 @@ def get_output_link_versions(project_name, version_id, fields=None): return conn.find(query_filter, _prepare_fields(fields)) -def get_last_versions(project_name, subset_ids, fields=None): +def get_last_versions(project_name, subset_ids, active=None, fields=None): """Latest versions for entered subset_ids. Args: project_name (str): Name of project where to look for queried entities. subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids. + active (Optional[bool]): If True only active versions are returned. fields (Optional[Iterable[str]]): Fields that should be returned. All fields are returned if 'None' is passed. @@ -899,12 +900,21 @@ def get_last_versions(project_name, subset_ids, fields=None): if name_needed: group_item["name"] = {"$last": "$name"} + aggregate_filter = { + "type": "version", + "parent": {"$in": subset_ids} + } + if active is False: + aggregate_filter["data.active"] = active + elif active is True: + aggregate_filter["$or"] = [ + {"data.active": {"$exists": 0}}, + {"data.active": active}, + ] + aggregation_pipeline = [ # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, + {"$match": aggregate_filter}, # Sorting versions all together {"$sort": {"name": 1}}, # Group them by "parent", but only take the last diff --git a/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py new file mode 100644 index 0000000000..559e9ae0ce --- /dev/null +++ b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py @@ -0,0 +1,55 @@ +from pathlib import Path + +from openpype.lib import PreLaunchHook + + +class AddPythonScriptToLaunchArgs(PreLaunchHook): + """Add python script to be executed before Blender launch.""" + + # Append after file argument + order = 15 + app_groups = [ + "blender", + ] + + def execute(self): + if not self.launch_context.data.get("python_scripts"): + return + + # Add path to workfile to arguments + for python_script_path in self.launch_context.data["python_scripts"]: + self.log.info( + f"Adding python script {python_script_path} to launch" + ) + # Test script path exists + python_script_path = Path(python_script_path) + if not python_script_path.exists(): + self.log.warning( + f"Python script {python_script_path} doesn't exist. " + "Skipped..." + ) + continue + + if "--" in self.launch_context.launch_args: + # Insert before separator + separator_index = self.launch_context.launch_args.index("--") + self.launch_context.launch_args.insert( + separator_index, + "-P", + ) + self.launch_context.launch_args.insert( + separator_index + 1, + python_script_path.as_posix(), + ) + else: + self.launch_context.launch_args.extend( + ["-P", python_script_path.as_posix()] + ) + + # Ensure separator + if "--" not in self.launch_context.launch_args: + self.launch_context.launch_args.append("--") + + self.launch_context.launch_args.extend( + [*self.launch_context.data.get("script_args", [])] + ) diff --git a/openpype/hosts/houdini/api/colorspace.py b/openpype/hosts/houdini/api/colorspace.py new file mode 100644 index 0000000000..7047644225 --- /dev/null +++ b/openpype/hosts/houdini/api/colorspace.py @@ -0,0 +1,56 @@ +import attr +import hou +from openpype.hosts.houdini.api.lib import get_color_management_preferences + + +@attr.s +class LayerMetadata(object): + """Data class for Render Layer metadata.""" + frameStart = attr.ib() + frameEnd = attr.ib() + + +@attr.s +class RenderProduct(object): + """Getting Colorspace as + Specific Render Product Parameter for submitting + publish job. + + """ + colorspace = attr.ib() # colorspace + view = attr.ib() + productName = attr.ib(default=None) + + +class ARenderProduct(object): + + def __init__(self): + """Constructor.""" + # Initialize + self.layer_data = self._get_layer_data() + self.layer_data.products = self.get_colorspace_data() + + def _get_layer_data(self): + return LayerMetadata( + frameStart=int(hou.playbar.frameRange()[0]), + frameEnd=int(hou.playbar.frameRange()[1]), + ) + + def get_colorspace_data(self): + """To be implemented by renderer class. + + This should return a list of RenderProducts. + + Returns: + list: List of RenderProduct + + """ + data = get_color_management_preferences() + colorspace_data = [ + RenderProduct( + colorspace=data["display"], + view=data["view"], + productName="" + ) + ] + return colorspace_data diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index 2e58f3dd98..a33ba7aad2 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import sys import os +import re import uuid import logging from contextlib import contextmanager @@ -581,3 +582,74 @@ def splitext(name, allowed_multidot_extensions): return name[:-len(ext)], ext return os.path.splitext(name) + + +def get_top_referenced_parm(parm): + + processed = set() # disallow infinite loop + while True: + if parm.path() in processed: + raise RuntimeError("Parameter references result in cycle.") + + processed.add(parm.path()) + + ref = parm.getReferencedParm() + if ref.path() == parm.path(): + # It returns itself when it doesn't reference + # another parameter + return ref + else: + parm = ref + + +def evalParmNoFrame(node, parm, pad_character="#"): + + parameter = node.parm(parm) + assert parameter, "Parameter does not exist: %s.%s" % (node, parm) + + # If the parameter has a parameter reference, then get that + # parameter instead as otherwise `unexpandedString()` fails. + parameter = get_top_referenced_parm(parameter) + + # Substitute out the frame numbering with padded characters + try: + raw = parameter.unexpandedString() + except hou.Error as exc: + print("Failed: %s" % parameter) + raise RuntimeError(exc) + + def replace(match): + padding = 1 + n = match.group(2) + if n and int(n): + padding = int(n) + return pad_character * padding + + expression = re.sub(r"(\$F([0-9]*))", replace, raw) + + with hou.ScriptEvalContext(parameter): + return hou.expandStringAtFrame(expression, 0) + + +def get_color_management_preferences(): + """Get default OCIO preferences""" + data = { + "config": hou.Color.ocio_configPath() + + } + + # Get default display and view from OCIO + display = hou.Color.ocio_defaultDisplay() + disp_regex = re.compile(r"^(?P.+-)(?P.+)$") + disp_match = disp_regex.match(display) + + view = hou.Color.ocio_defaultView() + view_regex = re.compile(r"^(?P.+- )(?P.+)$") + view_match = view_regex.match(view) + data.update({ + "display": disp_match.group("display"), + "view": view_match.group("view") + + }) + + return data diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_rop.py b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py new file mode 100644 index 0000000000..bddf26dbd5 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py @@ -0,0 +1,71 @@ +from openpype.hosts.houdini.api import plugin +from openpype.lib import EnumDef + + +class CreateArnoldRop(plugin.HoudiniCreator): + """Arnold ROP""" + + identifier = "io.openpype.creators.houdini.arnold_rop" + label = "Arnold ROP" + family = "arnold_rop" + icon = "magic" + defaults = ["master"] + + # Default extension + ext = "exr" + + def create(self, subset_name, instance_data, pre_create_data): + import hou + + # Remove the active, we are checking the bypass flag of the nodes + instance_data.pop("active", None) + instance_data.update({"node_type": "arnold"}) + + # Add chunk size attribute + instance_data["chunkSize"] = 1 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateArnoldRop, self).create( + subset_name, + instance_data, + pre_create_data) # type: plugin.CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + ext = pre_create_data.get("image_format") + + filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + ext=ext, + ) + parms = { + # Render frame range + "trange": 1, + + # Arnold ROP settings + "ar_picture": filepath, + "ar_exr_half_precision": 1 # half precision + } + + instance_node.setParms(parms) + + # Lock any parameters in this list + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default=self.ext, + label="Image Format Options") + ] diff --git a/openpype/hosts/houdini/plugins/create/create_karma_rop.py b/openpype/hosts/houdini/plugins/create/create_karma_rop.py new file mode 100644 index 0000000000..edfb992e1a --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_karma_rop.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +"""Creator plugin to create Karma ROP.""" +from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.lib import BoolDef, EnumDef, NumberDef + + +class CreateKarmaROP(plugin.HoudiniCreator): + """Karma ROP""" + identifier = "io.openpype.creators.houdini.karma_rop" + label = "Karma ROP" + family = "karma_rop" + icon = "magic" + defaults = ["master"] + + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa + + instance_data.pop("active", None) + instance_data.update({"node_type": "karma"}) + # Add chunk size attribute + instance_data["chunkSize"] = 10 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateKarmaROP, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + ext = pre_create_data.get("image_format") + + filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + ext=ext, + ) + checkpoint = "{cp_dir}{subset_name}.$F4.checkpoint".format( + cp_dir=hou.text.expandString("$HIP/pyblish/"), + subset_name=subset_name + ) + + usd_directory = "{usd_dir}{subset_name}_$RENDERID".format( + usd_dir=hou.text.expandString("$HIP/pyblish/renders/usd_renders/"), # noqa + subset_name=subset_name + ) + + parms = { + # Render Frame Range + "trange": 1, + # Karma ROP Setting + "picture": filepath, + # Karma Checkpoint Setting + "productName": checkpoint, + # USD Output Directory + "savetodirectory": usd_directory, + } + + res_x = pre_create_data.get("res_x") + res_y = pre_create_data.get("res_y") + + if self.selected_nodes: + # If camera found in selection + # we will use as render camera + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + has_camera = pre_create_data.get("cam_res") + if has_camera: + res_x = node.evalParm("resx") + res_y = node.evalParm("resy") + + if not camera: + self.log.warning("No render camera found in selection") + + parms.update({ + "camera": camera or "", + "resolutionx": res_x, + "resolutiony": res_y, + }) + + instance_node.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default="exr", + label="Image Format Options"), + NumberDef("res_x", + label="width", + default=1920, + decimals=0), + NumberDef("res_y", + label="height", + default=720, + decimals=0), + BoolDef("cam_res", + label="Camera Resolution", + default=False) + ] diff --git a/openpype/hosts/houdini/plugins/create/create_mantra_rop.py b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py new file mode 100644 index 0000000000..5ca53e96de --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +"""Creator plugin to create Mantra ROP.""" +from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.lib import EnumDef, BoolDef + + +class CreateMantraROP(plugin.HoudiniCreator): + """Mantra ROP""" + identifier = "io.openpype.creators.houdini.mantra_rop" + label = "Mantra ROP" + family = "mantra_rop" + icon = "magic" + defaults = ["master"] + + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa + + instance_data.pop("active", None) + instance_data.update({"node_type": "ifd"}) + # Add chunk size attribute + instance_data["chunkSize"] = 10 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateMantraROP, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + ext = pre_create_data.get("image_format") + + filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + ext=ext, + ) + + parms = { + # Render Frame Range + "trange": 1, + # Mantra ROP Setting + "vm_picture": filepath, + } + + if self.selected_nodes: + # If camera found in selection + # we will use as render camera + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + camera = node.path() + + if not camera: + self.log.warning("No render camera found in selection") + + parms.update({"camera": camera or ""}) + + custom_res = pre_create_data.get("override_resolution") + if custom_res: + parms.update({"override_camerares": 1}) + instance_node.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateMantraROP, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default="exr", + label="Image Format Options"), + BoolDef("override_resolution", + label="Override Camera Resolution", + tooltip="Override the current camera " + "resolution, recommended for IPR.", + default=False) + ] diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py index 2cbe9bfda1..e14ff15bf8 100644 --- a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py +++ b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py @@ -1,7 +1,10 @@ # -*- coding: utf-8 -*- """Creator plugin to create Redshift ROP.""" +import hou # noqa + from openpype.hosts.houdini.api import plugin from openpype.pipeline import CreatedInstance +from openpype.lib import EnumDef class CreateRedshiftROP(plugin.HoudiniCreator): @@ -11,20 +14,16 @@ class CreateRedshiftROP(plugin.HoudiniCreator): family = "redshift_rop" icon = "magic" defaults = ["master"] + ext = "exr" def create(self, subset_name, instance_data, pre_create_data): - import hou # noqa instance_data.pop("active", None) instance_data.update({"node_type": "Redshift_ROP"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - - # Clear the family prefix from the subset - subset = subset_name - subset_no_prefix = subset[len(self.family):] - subset_no_prefix = subset_no_prefix[0].lower() + subset_no_prefix[1:] - subset_name = subset_no_prefix + # Submit for job publishing + instance_data["farm"] = True instance = super(CreateRedshiftROP, self).create( subset_name, @@ -34,11 +33,10 @@ class CreateRedshiftROP(plugin.HoudiniCreator): instance_node = hou.node(instance.get("instance_node")) basename = instance_node.name() - instance_node.setName(basename + "_ROP", unique_name=True) # Also create the linked Redshift IPR Rop try: - ipr_rop = self.parent.createNode( + ipr_rop = instance_node.parent().createNode( "Redshift_IPR", node_name=basename + "_IPR" ) except hou.OperationFailed: @@ -50,19 +48,58 @@ class CreateRedshiftROP(plugin.HoudiniCreator): ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1)) # Set the linked rop to the Redshift ROP - ipr_rop.parm("linked_rop").set(ipr_rop.relativePathTo(instance)) + ipr_rop.parm("linked_rop").set(instance_node.path()) + + ext = pre_create_data.get("image_format") + filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + fmt="${aov}.$F4.{ext}".format(aov="AOV", ext=ext) + ) - prefix = '${HIP}/render/${HIPNAME}/`chs("subset")`.${AOV}.$F4.exr' parms = { # Render frame range "trange": 1, # Redshift ROP settings - "RS_outputFileNamePrefix": prefix, - "RS_outputMultilayerMode": 0, # no multi-layered exr + "RS_outputFileNamePrefix": filepath, + "RS_outputMultilayerMode": "1", # no multi-layered exr "RS_outputBeautyAOVSuffix": "beauty", } + + if self.selected_nodes: + # set up the render camera from the selected node + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + camera = node.path() + parms.update({ + "RS_renderCamera": camera or ""}) instance_node.setParms(parms) # Lock some Avalon attributes to_lock = ["family", "id"] self.lock_parameters(instance_node, to_lock) + + def remove_instances(self, instances): + for instance in instances: + node = instance.data.get("instance_node") + + ipr_node = hou.node(f"{node}_IPR") + if ipr_node: + ipr_node.destroy() + + return super(CreateRedshiftROP, self).remove_instances(instances) + + def get_pre_create_attr_defs(self): + attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs() + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default=self.ext, + label="Image Format Options") + ] diff --git a/openpype/hosts/houdini/plugins/create/create_vray_rop.py b/openpype/hosts/houdini/plugins/create/create_vray_rop.py new file mode 100644 index 0000000000..1de9be4ed6 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_vray_rop.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +"""Creator plugin to create VRay ROP.""" +import hou + +from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.lib import EnumDef, BoolDef + + +class CreateVrayROP(plugin.HoudiniCreator): + """VRay ROP""" + + identifier = "io.openpype.creators.houdini.vray_rop" + label = "VRay ROP" + family = "vray_rop" + icon = "magic" + defaults = ["master"] + + ext = "exr" + + def create(self, subset_name, instance_data, pre_create_data): + + instance_data.pop("active", None) + instance_data.update({"node_type": "vray_renderer"}) + # Add chunk size attribute + instance_data["chunkSize"] = 10 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateVrayROP, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + # Add IPR for Vray + basename = instance_node.name() + try: + ipr_rop = instance_node.parent().createNode( + "vray", node_name=basename + "_IPR" + ) + except hou.OperationFailed: + raise plugin.OpenPypeCreatorError( + "Cannot create Vray render node. " + "Make sure Vray installed and enabled!" + ) + + ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1)) + ipr_rop.parm("rop").set(instance_node.path()) + + parms = { + "trange": 1, + "SettingsEXR_bits_per_channel": "16" # half precision + } + + if self.selected_nodes: + # set up the render camera from the selected node + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + camera = node.path() + parms.update({ + "render_camera": camera or "" + }) + + # Enable render element + ext = pre_create_data.get("image_format") + instance_data["RenderElement"] = pre_create_data.get("render_element_enabled") # noqa + if pre_create_data.get("render_element_enabled", True): + # Vray has its own tag for AOV file output + filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + fmt="${aov}.$F4.{ext}".format(aov="AOV", + ext=ext) + ) + filepath = "{}{}".format( + hou.text.expandString("$HIP/pyblish/renders/"), + "{}/{}.${}.$F4.{}".format(subset_name, + subset_name, + "AOV", + ext) + ) + re_rop = instance_node.parent().createNode( + "vray_render_channels", + node_name=basename + "_render_element" + ) + # move the render element node next to the vray renderer node + re_rop.setPosition(instance_node.position() + hou.Vector2(0, 1)) + re_path = re_rop.path() + parms.update({ + "use_render_channels": 1, + "SettingsOutput_img_file_path": filepath, + "render_network_render_channels": re_path + }) + + else: + filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + fmt="$F4.{ext}".format(ext=ext) + ) + parms.update({ + "use_render_channels": 0, + "SettingsOutput_img_file_path": filepath + }) + + custom_res = pre_create_data.get("override_resolution") + if custom_res: + parms.update({"override_camerares": 1}) + + instance_node.setParms(parms) + + # lock parameters from AVALON + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def remove_instances(self, instances): + for instance in instances: + node = instance.data.get("instance_node") + # for the extra render node from the plugins + # such as vray and redshift + ipr_node = hou.node("{}{}".format(node, "_IPR")) + if ipr_node: + ipr_node.destroy() + re_node = hou.node("{}{}".format(node, + "_render_element")) + if re_node: + re_node.destroy() + + return super(CreateVrayROP, self).remove_instances(instances) + + def get_pre_create_attr_defs(self): + attrs = super(CreateVrayROP, self).get_pre_create_attr_defs() + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default=self.ext, + label="Image Format Options"), + BoolDef("override_resolution", + label="Override Camera Resolution", + tooltip="Override the current camera " + "resolution, recommended for IPR.", + default=False), + BoolDef("render_element_enabled", + label="Render Element", + tooltip="Create Render Element Node " + "if enabled", + default=False) + ] diff --git a/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py new file mode 100644 index 0000000000..614785487f --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py @@ -0,0 +1,135 @@ +import os +import re + +import hou +import pyblish.api + +from openpype.hosts.houdini.api import colorspace +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, get_color_management_preferences) + + +class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Arnold ROP Render Products + + Collects the instance.data["files"] for the render products. + + Provides: + instance -> files + + """ + + label = "Arnold ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["arnold_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "ar_picture") + render_products = [] + + # Default beauty AOV + beauty_product = self.get_render_product_name(prefix=default_prefix, + suffix=None) + render_products.append(beauty_product) + + files_by_aov = { + "": self.generate_expected_files(instance, beauty_product) + } + + num_aovs = rop.evalParm("ar_aovs") + for index in range(1, num_aovs + 1): + # Skip disabled AOVs + if not rop.evalParm("ar_enable_aovP{}".format(index)): + continue + + if rop.evalParm("ar_aov_exr_enable_layer_name{}".format(index)): + label = rop.evalParm("ar_aov_exr_layer_name{}".format(index)) + else: + label = evalParmNoFrame(rop, "ar_aov_label{}".format(index)) + + aov_product = self.get_render_product_name(default_prefix, + suffix=label) + render_products.append(aov_product) + files_by_aov[label] = self.generate_expected_files(instance, + aov_product) + + for product in render_products: + self.log.debug("Found render product: {}".format(product)) + + instance.data["files"] = list(render_products) + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_render_product_name(self, prefix, suffix): + """Return the output filename using the AOV prefix and suffix""" + + # When AOV is explicitly defined in prefix we just swap it out + # directly with the AOV suffix to embed it. + # Note: ${AOV} seems to be evaluated in the parameter as %AOV% + if "%AOV%" in prefix: + # It seems that when some special separator characters are present + # before the %AOV% token that Redshift will secretly remove it if + # there is no suffix for the current product, for example: + # foo_%AOV% -> foo.exr + pattern = "%AOV%" if suffix else "[._-]?%AOV%" + product_name = re.sub(pattern, + suffix, + prefix, + flags=re.IGNORECASE) + else: + if suffix: + # Add ".{suffix}" before the extension + prefix_base, ext = os.path.splitext(prefix) + product_name = prefix_base + "." + suffix + ext + else: + product_name = prefix + + return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py index 059793e3c5..91a3d9d170 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_frames.py +++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py @@ -11,15 +11,13 @@ from openpype.hosts.houdini.api import lib class CollectFrames(pyblish.api.InstancePlugin): """Collect all frames which would be saved from the ROP nodes""" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.01 label = "Collect Frames" families = ["vdbcache", "imagesequence", "ass", "redshiftproxy", "review"] def process(self, instance): ropnode = hou.node(instance.data["instance_node"]) - frame_data = lib.get_frame_data(ropnode) - instance.data.update(frame_data) start_frame = instance.data.get("frameStart", None) end_frame = instance.data.get("frameEnd", None) diff --git a/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py b/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py new file mode 100644 index 0000000000..584343cd64 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py @@ -0,0 +1,56 @@ +import hou + +import pyblish.api + + +class CollectInstanceNodeFrameRange(pyblish.api.InstancePlugin): + """Collect time range frame data for the instance node.""" + + order = pyblish.api.CollectorOrder + 0.001 + label = "Instance Node Frame Range" + hosts = ["houdini"] + + def process(self, instance): + + node_path = instance.data.get("instance_node") + node = hou.node(node_path) if node_path else None + if not node_path or not node: + self.log.debug("No instance node found for instance: " + "{}".format(instance)) + return + + frame_data = self.get_frame_data(node) + if not frame_data: + return + + self.log.info("Collected time data: {}".format(frame_data)) + instance.data.update(frame_data) + + def get_frame_data(self, node): + """Get the frame data: start frame, end frame and steps + Args: + node(hou.Node) + + Returns: + dict + + """ + + data = {} + + if node.parm("trange") is None: + self.log.debug("Node has no 'trange' parameter: " + "{}".format(node.path())) + return data + + if node.evalParm("trange") == 0: + # Ignore 'render current frame' + self.log.debug("Node '{}' has 'Render current frame' set. " + "Time range data ignored.".format(node.path())) + return data + + data["frameStart"] = node.evalParm("f1") + data["frameEnd"] = node.evalParm("f2") + data["byFrameStep"] = node.evalParm("f3") + + return data diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py index 5d5347f96e..3772c9e705 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_instances.py +++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py @@ -70,16 +70,10 @@ class CollectInstances(pyblish.api.ContextPlugin): if "active" in data: data["publish"] = data["active"] - data.update(self.get_frame_data(node)) - # Create nice name if the instance has a frame range. label = data.get("name", node.name()) label += " (%s)" % data["asset"] # include asset in name - if "frameStart" in data and "frameEnd" in data: - frames = "[{frameStart} - {frameEnd}]".format(**data) - label = "{} {}".format(label, frames) - instance = context.create_instance(label) # Include `families` using `family` data @@ -118,6 +112,6 @@ class CollectInstances(pyblish.api.ContextPlugin): data["frameStart"] = node.evalParm("f1") data["frameEnd"] = node.evalParm("f2") - data["steps"] = node.evalParm("f3") + data["byFrameStep"] = node.evalParm("f3") return data diff --git a/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py new file mode 100644 index 0000000000..eabb1128d8 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py @@ -0,0 +1,104 @@ +import re +import os + +import hou +import pyblish.api + +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) + + +class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Karma Render Products + + Collects the instance.data["files"] for the multipart render product. + + Provides: + instance -> files + + """ + + label = "Karma ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["karma_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "picture") + render_products = [] + + # Default beauty AOV + beauty_product = self.get_render_product_name( + prefix=default_prefix, suffix=None + ) + render_products.append(beauty_product) + + files_by_aov = { + "beauty": self.generate_expected_files(instance, + beauty_product) + } + + filenames = list(render_products) + instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + for product in render_products: + self.log.debug("Found render product: %s" % product) + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_render_product_name(self, prefix, suffix): + product_name = prefix + if suffix: + # Add ".{suffix}" before the extension + prefix_base, ext = os.path.splitext(prefix) + product_name = "{}.{}{}".format(prefix_base, suffix, ext) + + return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py new file mode 100644 index 0000000000..c4460f5350 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py @@ -0,0 +1,127 @@ +import re +import os + +import hou +import pyblish.api + +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) + + +class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Mantra Render Products + + Collects the instance.data["files"] for the render products. + + Provides: + instance -> files + + """ + + label = "Mantra ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["mantra_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "vm_picture") + render_products = [] + + # Default beauty AOV + beauty_product = self.get_render_product_name( + prefix=default_prefix, suffix=None + ) + render_products.append(beauty_product) + + files_by_aov = { + "beauty": self.generate_expected_files(instance, + beauty_product) + } + + aov_numbers = rop.evalParm("vm_numaux") + if aov_numbers > 0: + # get the filenames of the AOVs + for i in range(1, aov_numbers + 1): + var = rop.evalParm("vm_variable_plane%d" % i) + if var: + aov_name = "vm_filename_plane%d" % i + aov_boolean = "vm_usefile_plane%d" % i + aov_enabled = rop.evalParm(aov_boolean) + has_aov_path = rop.evalParm(aov_name) + if has_aov_path and aov_enabled == 1: + aov_prefix = evalParmNoFrame(rop, aov_name) + aov_product = self.get_render_product_name( + prefix=aov_prefix, suffix=None + ) + render_products.append(aov_product) + + files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa + + for product in render_products: + self.log.debug("Found render product: %s" % product) + + filenames = list(render_products) + instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_render_product_name(self, prefix, suffix): + product_name = prefix + if suffix: + # Add ".{suffix}" before the extension + prefix_base, ext = os.path.splitext(prefix) + product_name = prefix_base + "." + suffix + ext + + return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py index f1d73d7523..dbb15ab88f 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py +++ b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py @@ -4,52 +4,13 @@ import os import hou import pyblish.api - -def get_top_referenced_parm(parm): - - processed = set() # disallow infinite loop - while True: - if parm.path() in processed: - raise RuntimeError("Parameter references result in cycle.") - - processed.add(parm.path()) - - ref = parm.getReferencedParm() - if ref.path() == parm.path(): - # It returns itself when it doesn't reference - # another parameter - return ref - else: - parm = ref - - -def evalParmNoFrame(node, parm, pad_character="#"): - - parameter = node.parm(parm) - assert parameter, "Parameter does not exist: %s.%s" % (node, parm) - - # If the parameter has a parameter reference, then get that - # parameter instead as otherwise `unexpandedString()` fails. - parameter = get_top_referenced_parm(parameter) - - # Substitute out the frame numbering with padded characters - try: - raw = parameter.unexpandedString() - except hou.Error as exc: - print("Failed: %s" % parameter) - raise RuntimeError(exc) - - def replace(match): - padding = 1 - n = match.group(2) - if n and int(n): - padding = int(n) - return pad_character * padding - - expression = re.sub(r"(\$F([0-9]*))", replace, raw) - - with hou.ScriptEvalContext(parameter): - return hou.expandStringAtFrame(expression, 0) +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): @@ -87,6 +48,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): prefix=default_prefix, suffix=beauty_suffix ) render_products.append(beauty_product) + files_by_aov = { + "_": self.generate_expected_files(instance, + beauty_product)} num_aovs = rop.evalParm("RS_aov") for index in range(num_aovs): @@ -104,11 +68,29 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): aov_product = self.get_render_product_name(aov_prefix, aov_suffix) render_products.append(aov_product) + files_by_aov[aov_suffix] = self.generate_expected_files(instance, + aov_product) # noqa + for product in render_products: self.log.debug("Found render product: %s" % product) filenames = list(render_products) instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] def get_render_product_name(self, prefix, suffix): """Return the output filename using the AOV prefix and suffix""" @@ -133,3 +115,27 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): product_name = prefix return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py new file mode 100644 index 0000000000..2a6be6b9f1 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +"""Collector plugin for frames data on ROP instances.""" +import hou # noqa +import pyblish.api +from openpype.hosts.houdini.api import lib + + +class CollectRopFrameRange(pyblish.api.InstancePlugin): + """Collect all frames which would be saved from the ROP nodes""" + + order = pyblish.api.CollectorOrder + label = "Collect RopNode Frame Range" + + def process(self, instance): + + node_path = instance.data.get("instance_node") + if node_path is None: + # Instance without instance node like a workfile instance + return + + ropnode = hou.node(node_path) + frame_data = lib.get_frame_data(ropnode) + + if "frameStart" in frame_data and "frameEnd" in frame_data: + + # Log artist friendly message about the collected frame range + message = ( + "Frame range {0[frameStart]} - {0[frameEnd]}" + ).format(frame_data) + if frame_data.get("step", 1.0) != 1.0: + message += " with step {0[step]}".format(frame_data) + self.log.info(message) + + instance.data.update(frame_data) + + # Add frame range to label if the instance has a frame range. + label = instance.data.get("label", instance.data["name"]) + instance.data["label"] = ( + "{0} [{1[frameStart]} - {1[frameEnd]}]".format(label, + frame_data) + ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py new file mode 100644 index 0000000000..d4fe37f993 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py @@ -0,0 +1,129 @@ +import re +import os + +import hou +import pyblish.api + +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) + + +class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Vray Render Products + + Collects the instance.data["files"] for the render products. + + Provides: + instance -> files + + """ + + label = "VRay ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["vray_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "SettingsOutput_img_file_path") + render_products = [] + # TODO: add render elements if render element + + beauty_product = self.get_beauty_render_product(default_prefix) + render_products.append(beauty_product) + files_by_aov = { + "RGB Color": self.generate_expected_files(instance, + beauty_product)} + + if instance.data.get("RenderElement", True): + render_element = self.get_render_element_name(rop, default_prefix) + if render_element: + for aov, renderpass in render_element.items(): + render_products.append(renderpass) + files_by_aov[aov] = self.generate_expected_files(instance, renderpass) # noqa + + for product in render_products: + self.log.debug("Found render product: %s" % product) + filenames = list(render_products) + instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + self.log.debug("expectedFiles:{}".format(files_by_aov)) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_beauty_render_product(self, prefix, suffix=""): + """Return the beauty output filename if render element enabled + """ + aov_parm = ".{}".format(suffix) + beauty_product = None + if aov_parm in prefix: + beauty_product = prefix.replace(aov_parm, "") + else: + beauty_product = prefix + + return beauty_product + + def get_render_element_name(self, node, prefix, suffix=""): + """Return the output filename using the AOV prefix and suffix + """ + render_element_dict = {} + # need a rewrite + re_path = node.evalParm("render_network_render_channels") + if re_path: + node_children = hou.node(re_path).children() + for element in node_children: + if element.shaderName() != "vray:SettingsRenderChannels": + aov = str(element) + render_product = prefix.replace(suffix, aov) + render_element_dict[aov] = render_product + return render_element_dict + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py index 16d9ef9aec..2493b28bc1 100644 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -2,7 +2,10 @@ import pyblish.api from openpype.lib import version_up from openpype.pipeline import registered_host +from openpype.action import get_errored_plugins_from_data from openpype.hosts.houdini.api import HoudiniHost +from openpype.pipeline.publish import KnownPublishError + class IncrementCurrentFile(pyblish.api.ContextPlugin): """Increment the current file. @@ -14,17 +17,32 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin): label = "Increment current file" order = pyblish.api.IntegratorOrder + 9.0 hosts = ["houdini"] - families = ["workfile"] + families = ["workfile", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "usdrender"] optional = True def process(self, context): + errored_plugins = get_errored_plugins_from_data(context) + if any( + plugin.__name__ == "HoudiniSubmitPublishDeadline" + for plugin in errored_plugins + ): + raise KnownPublishError( + "Skipping incrementing current file because " + "submission to deadline failed." + ) + # Filename must not have changed since collecting host = registered_host() # type: HoudiniHost current_file = host.current_file() assert ( context.data["currentFile"] == current_file - ), "Collected filename from current scene name." + ), "Collected filename mismatches from current scene name." new_filepath = version_up(current_file) host.save_workfile(new_filepath) diff --git a/openpype/hosts/max/api/pipeline.py b/openpype/hosts/max/api/pipeline.py index 50fe30b299..03b85a4066 100644 --- a/openpype/hosts/max/api/pipeline.py +++ b/openpype/hosts/max/api/pipeline.py @@ -6,7 +6,7 @@ from operator import attrgetter import json -from openpype.host import HostBase, IWorkfileHost, ILoadHost, INewPublisher +from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost import pyblish.api from openpype.pipeline import ( register_creator_plugin_path, @@ -28,7 +28,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") -class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher): +class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): name = "max" menu = None diff --git a/openpype/hosts/max/plugins/load/load_model.py b/openpype/hosts/max/plugins/load/load_model.py index 95ee014e07..5f1ae3378e 100644 --- a/openpype/hosts/max/plugins/load/load_model.py +++ b/openpype/hosts/max/plugins/load/load_model.py @@ -1,8 +1,5 @@ - import os -from openpype.pipeline import ( - load, get_representation_path -) +from openpype.pipeline import load, get_representation_path from openpype.hosts.max.api.pipeline import containerise from openpype.hosts.max.api import lib from openpype.hosts.max.api.lib import maintained_selection @@ -24,24 +21,20 @@ class ModelAbcLoader(load.LoaderPlugin): file_path = os.path.normpath(self.fname) abc_before = { - c for c in rt.rootNode.Children + c + for c in rt.rootNode.Children if rt.classOf(c) == rt.AlembicContainer } - abc_import_cmd = (f""" -AlembicImport.ImportToRoot = false -AlembicImport.CustomAttributes = true -AlembicImport.UVs = true -AlembicImport.VertexColors = true - -importFile @"{file_path}" #noPrompt - """) - - self.log.debug(f"Executing command: {abc_import_cmd}") - rt.execute(abc_import_cmd) + rt.AlembicImport.ImportToRoot = False + rt.AlembicImport.CustomAttributes = True + rt.AlembicImport.UVs = True + rt.AlembicImport.VertexColors = True + rt.importFile(file_path, rt.name("noPrompt")) abc_after = { - c for c in rt.rootNode.Children + c + for c in rt.rootNode.Children if rt.classOf(c) == rt.AlembicContainer } @@ -54,10 +47,12 @@ importFile @"{file_path}" #noPrompt abc_container = abc_containers.pop() return containerise( - name, [abc_container], context, loader=self.__class__.__name__) + name, [abc_container], context, loader=self.__class__.__name__ + ) def update(self, container, representation): from pymxs import runtime as rt + path = get_representation_path(representation) node = rt.getNodeByName(container["instance_node"]) rt.select(node.Children) @@ -76,9 +71,10 @@ importFile @"{file_path}" #noPrompt with maintained_selection(): rt.select(node) - lib.imprint(container["instance_node"], { - "representation": str(representation["_id"]) - }) + lib.imprint( + container["instance_node"], + {"representation": str(representation["_id"])}, + ) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/max/plugins/load/load_model_fbx.py b/openpype/hosts/max/plugins/load/load_model_fbx.py index 01e6acae12..61101c482d 100644 --- a/openpype/hosts/max/plugins/load/load_model_fbx.py +++ b/openpype/hosts/max/plugins/load/load_model_fbx.py @@ -1,8 +1,5 @@ import os -from openpype.pipeline import ( - load, - get_representation_path -) +from openpype.pipeline import load, get_representation_path from openpype.hosts.max.api.pipeline import containerise from openpype.hosts.max.api import lib from openpype.hosts.max.api.lib import maintained_selection @@ -24,10 +21,7 @@ class FbxModelLoader(load.LoaderPlugin): rt.FBXImporterSetParam("Animation", False) rt.FBXImporterSetParam("Cameras", False) rt.FBXImporterSetParam("Preserveinstances", True) - rt.importFile( - filepath, - rt.name("noPrompt"), - using=rt.FBXIMP) + rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP) container = rt.getNodeByName(f"{name}") if not container: @@ -38,7 +32,8 @@ class FbxModelLoader(load.LoaderPlugin): selection.Parent = container return containerise( - name, [container], context, loader=self.__class__.__name__) + name, [container], context, loader=self.__class__.__name__ + ) def update(self, container, representation): from pymxs import runtime as rt @@ -46,24 +41,21 @@ class FbxModelLoader(load.LoaderPlugin): path = get_representation_path(representation) node = rt.getNodeByName(container["instance_node"]) rt.select(node.Children) - fbx_reimport_cmd = ( - f""" -FBXImporterSetParam "Animation" false -FBXImporterSetParam "Cameras" false -FBXImporterSetParam "AxisConversionMethod" true -FbxExporterSetParam "UpAxis" "Y" -FbxExporterSetParam "Preserveinstances" true -importFile @"{path}" #noPrompt using:FBXIMP - """) - rt.execute(fbx_reimport_cmd) + rt.FBXImporterSetParam("Animation", False) + rt.FBXImporterSetParam("Cameras", False) + rt.FBXImporterSetParam("AxisConversionMethod", True) + rt.FBXImporterSetParam("UpAxis", "Y") + rt.FBXImporterSetParam("Preserveinstances", True) + rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP) with maintained_selection(): rt.select(node) - lib.imprint(container["instance_node"], { - "representation": str(representation["_id"]) - }) + lib.imprint( + container["instance_node"], + {"representation": str(representation["_id"])}, + ) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/max/plugins/load/load_pointcache.py b/openpype/hosts/max/plugins/load/load_pointcache.py index b3e12adc7b..5fb9772f87 100644 --- a/openpype/hosts/max/plugins/load/load_pointcache.py +++ b/openpype/hosts/max/plugins/load/load_pointcache.py @@ -5,9 +5,7 @@ Because of limited api, alembics can be only loaded, but not easily updated. """ import os -from openpype.pipeline import ( - load, get_representation_path -) +from openpype.pipeline import load, get_representation_path from openpype.hosts.max.api.pipeline import containerise from openpype.hosts.max.api import lib @@ -15,9 +13,7 @@ from openpype.hosts.max.api import lib class AbcLoader(load.LoaderPlugin): """Alembic loader.""" - families = ["camera", - "animation", - "pointcache"] + families = ["camera", "animation", "pointcache"] label = "Load Alembic" representations = ["abc"] order = -10 @@ -30,21 +26,17 @@ class AbcLoader(load.LoaderPlugin): file_path = os.path.normpath(self.fname) abc_before = { - c for c in rt.rootNode.Children + c + for c in rt.rootNode.Children if rt.classOf(c) == rt.AlembicContainer } - abc_export_cmd = (f""" -AlembicImport.ImportToRoot = false - -importFile @"{file_path}" #noPrompt - """) - - self.log.debug(f"Executing command: {abc_export_cmd}") - rt.execute(abc_export_cmd) + rt.AlembicImport.ImportToRoot = False + rt.importFile(file_path, rt.name("noPrompt")) abc_after = { - c for c in rt.rootNode.Children + c + for c in rt.rootNode.Children if rt.classOf(c) == rt.AlembicContainer } @@ -57,7 +49,8 @@ importFile @"{file_path}" #noPrompt abc_container = abc_containers.pop() return containerise( - name, [abc_container], context, loader=self.__class__.__name__) + name, [abc_container], context, loader=self.__class__.__name__ + ) def update(self, container, representation): from pymxs import runtime as rt @@ -69,9 +62,10 @@ importFile @"{file_path}" #noPrompt for alembic_object in alembic_objects: alembic_object.source = path - lib.imprint(container["instance_node"], { - "representation": str(representation["_id"]) - }) + lib.imprint( + container["instance_node"], + {"representation": str(representation["_id"])}, + ) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 387b7321b9..4681175808 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -181,16 +181,34 @@ class CreateRender(plugin.Creator): primary_pool = pool_setting["primary_pool"] sorted_pools = self._set_default_pool(list(pools), primary_pool) - cmds.addAttr(self.instance, longName="primaryPool", - attributeType="enum", - enumName=":".join(sorted_pools)) + cmds.addAttr( + self.instance, + longName="primaryPool", + attributeType="enum", + enumName=":".join(sorted_pools) + ) + cmds.setAttr( + "{}.primaryPool".format(self.instance), + 0, + keyable=False, + channelBox=True + ) pools = ["-"] + pools secondary_pool = pool_setting["secondary_pool"] sorted_pools = self._set_default_pool(list(pools), secondary_pool) - cmds.addAttr("{}.secondaryPool".format(self.instance), - attributeType="enum", - enumName=":".join(sorted_pools)) + cmds.addAttr( + self.instance, + longName="secondaryPool", + attributeType="enum", + enumName=":".join(sorted_pools) + ) + cmds.setAttr( + "{}.secondaryPool".format(self.instance), + 0, + keyable=False, + channelBox=True + ) def _create_render_settings(self): """Create instance settings.""" @@ -260,6 +278,12 @@ class CreateRender(plugin.Creator): default_priority) self.data["tile_priority"] = tile_priority + strict_error_checking = maya_submit_dl.get("strict_error_checking", + True) + self.data["strict_error_checking"] = strict_error_checking + + # Pool attributes should be last since they will be recreated when + # the deadline server changes. pool_setting = (self._project_settings["deadline"] ["publish"] ["CollectDeadlinePools"]) @@ -272,9 +296,6 @@ class CreateRender(plugin.Creator): secondary_pool = pool_setting["secondary_pool"] self.data["secondaryPool"] = self._set_default_pool(pool_names, secondary_pool) - strict_error_checking = maya_submit_dl.get("strict_error_checking", - True) - self.data["strict_error_checking"] = strict_error_checking if muster_enabled: self.log.info(">>> Loading Muster credentials ...") diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index f4a4a44344..74ca27ff3c 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -33,7 +33,7 @@ def preserve_modelpanel_cameras(container, log=None): panel_cameras = {} for panel in cmds.getPanel(type="modelPanel"): cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True), - long=True) + long=True)[0] # Often but not always maya returns the transform from the # modelPanel as opposed to the camera shape, so we convert it diff --git a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py index 0845f653b1..f160a3a0c5 100644 --- a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py +++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -35,13 +35,16 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin): # camera. cameras = cmds.ls(type="camera", long=True) renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] - camera = renderable[0] - for node in instance.data["contentMembers"]: - camera_shapes = cmds.listRelatives( - node, shapes=True, type="camera" - ) - if camera_shapes: - camera = node - instance.data["camera"] = camera + if renderable: + camera = renderable[0] + for node in instance.data["contentMembers"]: + camera_shapes = cmds.listRelatives( + node, shapes=True, type="camera" + ) + if camera_shapes: + camera = node + instance.data["camera"] = camera + else: + self.log.debug("No renderable cameras found.") self.log.debug("data: {}".format(instance.data)) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 7c47f17acb..babd494758 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -336,7 +336,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): context.data["system_settings"]["modules"]["deadline"] ) if deadline_settings["enabled"]: - data["deadlineUrl"] = render_instance.data.get("deadlineUrl") + data["deadlineUrl"] = render_instance.data["deadlineUrl"] if self.sync_workfile_version: data["version"] = context.data["version"] diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py index e27723e104..8ce76c8d04 100644 --- a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py +++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py @@ -70,5 +70,5 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin): @classmethod def repair(cls, instance): - for content_node, proxy_node in cls.get_invalid_couples(cls, instance): - lib.set_id(proxy_node, lib.get_id(content_node), overwrite=False) + for content_node, proxy_node in cls.get_invalid_couples(instance): + lib.set_id(proxy_node, lib.get_id(content_node), overwrite=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index ebf7b3138d..a5d5ab0c9e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -364,6 +364,17 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cmds.setAttr("defaultRenderGlobals.animation", True) # Repair prefix + if renderer == "arnold": + multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs") + if multipart: + separator_variations = [ + "_", + "_", + "", + ] + for variant in separator_variations: + default_prefix = default_prefix.replace(variant, "") + if renderer != "renderman": node = render_attrs["node"] prefix_attr = render_attrs["prefix"] diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index f341a78ad5..912bc5963b 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -1403,8 +1403,6 @@ def create_write_node( # adding write to read button add_button_clear_rendered(GN, os.path.dirname(fpath)) - GN.addKnob(nuke.Text_Knob('', '')) - # set tile color tile_color = next( iter( diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index 75b0f80d21..88f7144542 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -564,6 +564,7 @@ def remove_instance(instance): instance_node = instance.transient_data["node"] instance_knob = instance_node.knobs()[INSTANCE_DATA_KNOB] instance_node.removeKnob(instance_knob) + nuke.delete(instance_node) def select_instance(instance): diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 3566cb64c1..7035da2bb5 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -75,20 +75,6 @@ class NukeCreator(NewCreator): for pass_key in keys: creator_attrs[pass_key] = pre_create_data[pass_key] - def add_info_knob(self, node): - if "OP_info" in node.knobs().keys(): - return - - # add info text - info_knob = nuke.Text_Knob("OP_info", "") - info_knob.setValue(""" - -

This node is maintained by OpenPype Publisher.

-

To remove it use Publisher gui.

-
- """) - node.addKnob(info_knob) - def check_existing_subset(self, subset_name): """Make sure subset name is unique. @@ -153,8 +139,6 @@ class NukeCreator(NewCreator): created_node = nuke.createNode(node_type) created_node["name"].setValue(node_name) - self.add_info_knob(created_node) - for key, values in node_knobs.items(): if key in created_node.knobs(): created_node["key"].setValue(values) diff --git a/openpype/hosts/nuke/plugins/create/create_backdrop.py b/openpype/hosts/nuke/plugins/create/create_backdrop.py index ff415626be..52959bbef2 100644 --- a/openpype/hosts/nuke/plugins/create/create_backdrop.py +++ b/openpype/hosts/nuke/plugins/create/create_backdrop.py @@ -36,8 +36,6 @@ class CreateBackdrop(NukeCreator): created_node["note_font_size"].setValue(24) created_node["label"].setValue("[{}]".format(node_name)) - self.add_info_knob(created_node) - return created_node def create(self, subset_name, instance_data, pre_create_data): diff --git a/openpype/hosts/nuke/plugins/create/create_camera.py b/openpype/hosts/nuke/plugins/create/create_camera.py index 5553645af6..b84280b11b 100644 --- a/openpype/hosts/nuke/plugins/create/create_camera.py +++ b/openpype/hosts/nuke/plugins/create/create_camera.py @@ -39,8 +39,6 @@ class CreateCamera(NukeCreator): created_node["name"].setValue(node_name) - self.add_info_knob(created_node) - return created_node def create(self, subset_name, instance_data, pre_create_data): diff --git a/openpype/hosts/nuke/plugins/create/create_gizmo.py b/openpype/hosts/nuke/plugins/create/create_gizmo.py index e3ce70dd59..cbe2f635c9 100644 --- a/openpype/hosts/nuke/plugins/create/create_gizmo.py +++ b/openpype/hosts/nuke/plugins/create/create_gizmo.py @@ -40,8 +40,6 @@ class CreateGizmo(NukeCreator): created_node["name"].setValue(node_name) - self.add_info_knob(created_node) - return created_node def create(self, subset_name, instance_data, pre_create_data): diff --git a/openpype/hosts/nuke/plugins/create/create_model.py b/openpype/hosts/nuke/plugins/create/create_model.py index 08a53abca2..a94c9f0313 100644 --- a/openpype/hosts/nuke/plugins/create/create_model.py +++ b/openpype/hosts/nuke/plugins/create/create_model.py @@ -40,8 +40,6 @@ class CreateModel(NukeCreator): created_node["name"].setValue(node_name) - self.add_info_knob(created_node) - return created_node def create(self, subset_name, instance_data, pre_create_data): diff --git a/openpype/hosts/nuke/plugins/create/create_source.py b/openpype/hosts/nuke/plugins/create/create_source.py index 57504b5d53..8419c3ef33 100644 --- a/openpype/hosts/nuke/plugins/create/create_source.py +++ b/openpype/hosts/nuke/plugins/create/create_source.py @@ -32,7 +32,7 @@ class CreateSource(NukeCreator): read_node["tile_color"].setValue( int(self.node_color, 16)) read_node["name"].setValue(node_name) - self.add_info_knob(read_node) + return read_node def create(self, subset_name, instance_data, pre_create_data): diff --git a/openpype/hosts/nuke/plugins/create/create_write_image.py b/openpype/hosts/nuke/plugins/create/create_write_image.py index b74cea5dae..0c8adfb75c 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_image.py +++ b/openpype/hosts/nuke/plugins/create/create_write_image.py @@ -86,7 +86,6 @@ class CreateWriteImage(napi.NukeWriteCreator): "frame": nuke.frame() } ) - self.add_info_knob(created_node) self._add_frame_range_limit(created_node, instance_data) diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 387768b1dd..f46dd2d6d5 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -74,7 +74,6 @@ class CreateWritePrerender(napi.NukeWriteCreator): "height": height } ) - self.add_info_knob(created_node) self._add_frame_range_limit(created_node) diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index 09257f662e..c24405873a 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -66,7 +66,6 @@ class CreateWriteRender(napi.NukeWriteCreator): "height": height } ) - self.add_info_knob(created_node) self.integrate_links(created_node, outputs=False) diff --git a/openpype/hosts/nuke/startup/__init__.py b/openpype/hosts/nuke/startup/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/nuke/startup/custom_write_node.py b/openpype/hosts/nuke/startup/custom_write_node.py new file mode 100644 index 0000000000..d9313231d8 --- /dev/null +++ b/openpype/hosts/nuke/startup/custom_write_node.py @@ -0,0 +1,76 @@ +import os +import nuke +from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings + + +frame_padding = 5 +temp_rendering_path_template = ( + "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}") + +knobs_setting = { + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "exr" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit half" + }, + { + "type": "text", + "name": "compression", + "value": "Zip (1 scanline)" + }, + { + "type": "bool", + "name": "autocrop", + "value": True + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 186, + 35, + 35, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "bool", + "name": "create_directories", + "value": True + } + ] +} + + +def main(): + write_selected_nodes = [ + s for s in nuke.selectedNodes() if s.Class() == "Write"] + + ext = None + knobs = knobs_setting["knobs"] + for knob in knobs: + if knob["name"] == "file_type": + ext = knob["value"] + for w in write_selected_nodes: + # data for mapping the path + data = { + "work": os.getenv("AVALON_WORKDIR"), + "subset": w["name"].value(), + "frame": "#" * frame_padding, + "ext": ext + } + file_path = temp_rendering_path_template.format(**data) + file_path = file_path.replace("\\", "/") + w["file"].setValue(file_path) + set_node_knobs_from_settings(w, knobs) diff --git a/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py new file mode 100644 index 0000000000..f0cbabe20f --- /dev/null +++ b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py @@ -0,0 +1,47 @@ +""" OpenPype custom script for resetting read nodes start frame values """ + +import nuke +import nukescripts + + +class FrameSettingsPanel(nukescripts.PythonPanel): + """ Frame Settings Panel """ + def __init__(self): + nukescripts.PythonPanel.__init__(self, "Set Frame Start (Read Node)") + + # create knobs + self.frame = nuke.Int_Knob( + 'frame', 'Frame Number') + self.selected = nuke.Boolean_Knob("selection") + # add knobs to panel + self.addKnob(self.selected) + self.addKnob(self.frame) + + # set values + self.selected.setValue(False) + self.frame.setValue(nuke.root().firstFrame()) + + def process(self): + """ Process the panel values. """ + # get values + frame = self.frame.value() + if self.selected.value(): + # selected nodes processing + if not nuke.selectedNodes(): + return + for rn_ in nuke.selectedNodes(): + if rn_.Class() != "Read": + continue + rn_["frame_mode"].setValue("start_at") + rn_["frame"].setValue(str(frame)) + else: + # all nodes processing + for rn_ in nuke.allNodes(filter="Read"): + rn_["frame_mode"].setValue("start_at") + rn_["frame"].setValue(str(frame)) + + +def main(): + p_ = FrameSettingsPanel() + if p_.showModalDialog(): + print(p_.process()) diff --git a/openpype/hosts/resolve/api/workio.py b/openpype/hosts/resolve/api/workio.py index 5ce73eea53..5966fa6a43 100644 --- a/openpype/hosts/resolve/api/workio.py +++ b/openpype/hosts/resolve/api/workio.py @@ -43,18 +43,22 @@ def open_file(filepath): """ Loading project """ + + from . import bmdvr + pm = get_project_manager() + page = bmdvr.GetCurrentPage() + if page is not None: + # Save current project only if Resolve has an active page, otherwise + # we consider Resolve being in a pre-launch state (no open UI yet) + project = pm.GetCurrentProject() + print(f"Saving current project: {project}") + pm.SaveProject() + file = os.path.basename(filepath) fname, _ = os.path.splitext(file) dname, _ = fname.split("_v") - - # deal with current project - project = pm.GetCurrentProject() - log.info(f"Test `pm`: {pm}") - pm.SaveProject() - try: - log.info(f"Test `dname`: {dname}") if not set_project_manager_to_folder_name(dname): raise # load project from input path @@ -72,6 +76,7 @@ def open_file(filepath): return False return True + def current_file(): pm = get_project_manager() current_dir = os.getenv("AVALON_WORKDIR") diff --git a/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py new file mode 100644 index 0000000000..0e27ddb8c3 --- /dev/null +++ b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py @@ -0,0 +1,45 @@ +import os + +from openpype.lib import PreLaunchHook +import openpype.hosts.resolve + + +class ResolveLaunchLastWorkfile(PreLaunchHook): + """Special hook to open last workfile for Resolve. + + Checks 'start_last_workfile', if set to False, it will not open last + workfile. This property is set explicitly in Launcher. + """ + + # Execute after workfile template copy + order = 10 + app_groups = ["resolve"] + + def execute(self): + if not self.data.get("start_last_workfile"): + self.log.info("It is set to not start last workfile on start.") + return + + last_workfile = self.data.get("last_workfile_path") + if not last_workfile: + self.log.warning("Last workfile was not collected.") + return + + if not os.path.exists(last_workfile): + self.log.info("Current context does not have any workfile yet.") + return + + # Add path to launch environment for the startup script to pick up + self.log.info(f"Setting OPENPYPE_RESOLVE_OPEN_ON_LAUNCH to launch " + f"last workfile: {last_workfile}") + key = "OPENPYPE_RESOLVE_OPEN_ON_LAUNCH" + self.launch_context.env[key] = last_workfile + + # Set the openpype prelaunch startup script path for easy access + # in the LUA .scriptlib code + op_resolve_root = os.path.dirname(openpype.hosts.resolve.__file__) + script_path = os.path.join(op_resolve_root, "startup.py") + key = "OPENPYPE_RESOLVE_STARTUP_SCRIPT" + self.launch_context.env[key] = script_path + self.log.info("Setting OPENPYPE_RESOLVE_STARTUP_SCRIPT to: " + f"{script_path}") diff --git a/openpype/hosts/resolve/startup.py b/openpype/hosts/resolve/startup.py new file mode 100644 index 0000000000..79a64e0fbf --- /dev/null +++ b/openpype/hosts/resolve/startup.py @@ -0,0 +1,62 @@ +"""This script is used as a startup script in Resolve through a .scriptlib file + +It triggers directly after the launch of Resolve and it's recommended to keep +it optimized for fast performance since the Resolve UI is actually interactive +while this is running. As such, there's nothing ensuring the user isn't +continuing manually before any of the logic here runs. As such we also try +to delay any imports as much as possible. + +This code runs in a separate process to the main Resolve process. + +""" +import os + +import openpype.hosts.resolve.api + + +def ensure_installed_host(): + """Install resolve host with openpype and return the registered host. + + This function can be called multiple times without triggering an + additional install. + """ + from openpype.pipeline import install_host, registered_host + host = registered_host() + if host: + return host + + install_host(openpype.hosts.resolve.api) + return registered_host() + + +def launch_menu(): + print("Launching Resolve OpenPype menu..") + ensure_installed_host() + openpype.hosts.resolve.api.launch_pype_menu() + + +def open_file(path): + # Avoid the need to "install" the host + host = ensure_installed_host() + host.open_file(path) + + +def main(): + # Open last workfile + workfile_path = os.environ.get("OPENPYPE_RESOLVE_OPEN_ON_LAUNCH") + if workfile_path: + open_file(workfile_path) + else: + print("No last workfile set to open. Skipping..") + + # Launch OpenPype menu + from openpype.settings import get_project_settings + from openpype.pipeline.context_tools import get_current_project_name + project_name = get_current_project_name() + settings = get_project_settings(project_name) + if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True): + launch_menu() + + +if __name__ == "__main__": + main() diff --git a/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib new file mode 100644 index 0000000000..ec9b30a18d --- /dev/null +++ b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib @@ -0,0 +1,21 @@ +-- Run OpenPype's Python launch script for resolve +function file_exists(name) + local f = io.open(name, "r") + return f ~= nil and io.close(f) +end + + +openpype_startup_script = os.getenv("OPENPYPE_RESOLVE_STARTUP_SCRIPT") +if openpype_startup_script ~= nil then + script = fusion:MapPath(openpype_startup_script) + + if file_exists(script) then + -- We must use RunScript to ensure it runs in a separate + -- process to Resolve itself to avoid a deadlock for + -- certain imports of OpenPype libraries or Qt + print("Running launch script: " .. script) + fusion:RunScript(script) + else + print("Launch script not found at: " .. script) + end +end \ No newline at end of file diff --git a/openpype/hosts/resolve/utils.py b/openpype/hosts/resolve/utils.py index 1213fd9e7a..5e3003862f 100644 --- a/openpype/hosts/resolve/utils.py +++ b/openpype/hosts/resolve/utils.py @@ -53,6 +53,14 @@ def setup(env): src = os.path.join(directory, script) dst = os.path.join(util_scripts_dir, script) + + # TODO: Make this a less hacky workaround + if script == "openpype_startup.scriptlib": + # Handle special case for scriptlib that needs to be a folder + # up from the Comp folder in the Fusion scripts + dst = os.path.join(os.path.dirname(util_scripts_dir), + script) + log.info("Copying `{}` to `{}`...".format(src, dst)) if os.path.isdir(src): shutil.copytree( diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py index 0630dfb3da..8640500b18 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_editorial.py +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -487,7 +487,22 @@ or updating already created. Publishing will create OTIO file. ) # get video stream data - video_stream = media_data["streams"][0] + video_streams = [] + audio_streams = [] + for stream in media_data["streams"]: + codec_type = stream.get("codec_type") + if codec_type == "audio": + audio_streams.append(stream) + + elif codec_type == "video": + video_streams.append(stream) + + if not video_streams: + raise ValueError( + "Could not find video stream in source file." + ) + + video_stream = video_streams[0] return_data = { "video": True, "start_frame": 0, @@ -500,12 +515,7 @@ or updating already created. Publishing will create OTIO file. } # get audio streams data - audio_stream = [ - stream for stream in media_data["streams"] - if stream["codec_type"] == "audio" - ] - - if audio_stream: + if audio_streams: return_data["audio"] = True except Exception as exc: diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index 07107ec011..674eaa3b91 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -113,26 +113,29 @@ def pack_project( project_name )) - roots = project_doc["config"]["roots"] - # Determine root directory of project - source_root = None - source_root_name = None - for root_name, root_value in roots.items(): - if source_root is not None: - raise ValueError( - "Packaging is supported only for single root projects" - ) - source_root = root_value - source_root_name = root_name + root_path = None + source_root = {} + project_source_path = None + if not only_documents: + roots = project_doc["config"]["roots"] + # Determine root directory of project + source_root_name = None + for root_name, root_value in roots.items(): + if source_root is not None: + raise ValueError( + "Packaging is supported only for single root projects" + ) + source_root = root_value + source_root_name = root_name - root_path = source_root[platform.system().lower()] - print("Using root \"{}\" with path \"{}\"".format( - source_root_name, root_path - )) + root_path = source_root[platform.system().lower()] + print("Using root \"{}\" with path \"{}\"".format( + source_root_name, root_path + )) - project_source_path = os.path.join(root_path, project_name) - if not os.path.exists(project_source_path): - raise ValueError("Didn't find source of project files") + project_source_path = os.path.join(root_path, project_name) + if not os.path.exists(project_source_path): + raise ValueError("Didn't find source of project files") # Determine zip filepath where data will be stored if not destination_dir: @@ -273,8 +276,7 @@ def unpack_project( low_platform = platform.system().lower() project_name = metadata["project_name"] - source_root = metadata["root"] - root_path = source_root[low_platform] + root_path = metadata["root"].get(low_platform) # Drop existing collection replace_project_documents(project_name, docs, database_name) diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 7938c27233..e3e94d50cd 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -662,7 +662,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): # test if there is instance of workfile waiting # to be published. - assert i.data["publish"] is True, ( + assert i.data.get("publish", True) is True, ( "Workfile (scene) must be published along") return i diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index 9981bead3e..2de6073e29 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -5,23 +5,26 @@ This is resolving index of server lists stored in `deadlineServers` instance attribute or using default server if that attribute doesn't exists. """ +from maya import cmds + import pyblish.api class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" - order = pyblish.api.CollectorOrder + 0.415 + # Run before collect_render. + order = pyblish.api.CollectorOrder + 0.005 label = "Deadline Webservice from the Instance" families = ["rendering", "renderlayer"] + hosts = ["maya"] def process(self, instance): instance.data["deadlineUrl"] = self._collect_deadline_url(instance) self.log.info( "Using {} for submission.".format(instance.data["deadlineUrl"])) - @staticmethod - def _collect_deadline_url(render_instance): + def _collect_deadline_url(self, render_instance): # type: (pyblish.api.Instance) -> str """Get Deadline Webservice URL from render instance. @@ -49,8 +52,16 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): default_server = render_instance.context.data["defaultDeadline"] instance_server = render_instance.data.get("deadlineServers") if not instance_server: + self.log.debug("Using default server.") return default_server + # Get instance server as sting. + if isinstance(instance_server, int): + instance_server = cmds.getAttr( + "{}.deadlineServers".format(render_instance.data["objset"]), + asString=True + ) + default_servers = deadline_settings["deadline_urls"] project_servers = ( render_instance.context.data @@ -58,15 +69,23 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): ["deadline"] ["deadline_servers"] ) - deadline_servers = { + if not project_servers: + self.log.debug("Not project servers found. Using default servers.") + return default_servers[instance_server] + + project_enabled_servers = { k: default_servers[k] for k in project_servers if k in default_servers } - # This is Maya specific and may not reflect real selection of deadline - # url as dictionary keys in Python 2 are not ordered - return deadline_servers[ - list(deadline_servers.keys())[ - int(render_instance.data.get("deadlineServers")) - ] - ] + + msg = ( + "\"{}\" server on instance is not enabled in project settings." + " Enabled project servers:\n{}".format( + instance_server, project_enabled_servers + ) + ) + assert instance_server in project_enabled_servers, msg + + self.log.debug("Using project approved server.") + return project_enabled_servers[instance_server] diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py index cb2b0cf156..1a0d615dc3 100644 --- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -17,7 +17,8 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): `CollectDeadlineServerFromInstance`. """ - order = pyblish.api.CollectorOrder + 0.410 + # Run before collect_deadline_server_instance. + order = pyblish.api.CollectorOrder + 0.0025 label = "Default Deadline Webservice" pass_mongo_url = False diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 73ab689c9a..254914a850 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -1,19 +1,27 @@ +import hou + import os -import json +import attr import getpass from datetime import datetime - -import requests import pyblish.api -# import hou ??? - from openpype.pipeline import legacy_io from openpype.tests.lib import is_in_tests +from openpype_modules.deadline import abstract_submit_deadline +from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype.lib import is_running_from_build -class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): +@attr.s +class DeadlinePluginInfo(): + SceneFile = attr.ib(default=None) + OutputDriver = attr.ib(default=None) + Version = attr.ib(default=None) + IgnoreInputs = attr.ib(default=True) + + +class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): """Submit Solaris USD Render ROPs to Deadline. Renders are submitted to a Deadline Web Service as @@ -30,83 +38,57 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder hosts = ["houdini"] families = ["usdrender", - "redshift_rop"] + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop"] targets = ["local"] + use_published = True - def process(self, instance): + def get_job_info(self): + job_info = DeadlineJobInfo(Plugin="Houdini") + instance = self._instance context = instance.context - code = context.data["code"] + filepath = context.data["currentFile"] filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - deadline_user = context.data.get("deadlineUser", getpass.getuser()) - jobname = "%s - %s" % (filename, instance.name) - # Support code prefix label for batch name - batch_name = filename - if code: - batch_name = "{0} - {1}".format(code, batch_name) + job_info.Name = "{} - {}".format(filename, instance.name) + job_info.BatchName = filename + job_info.Plugin = "Houdini" + job_info.UserName = context.data.get( + "deadlineUser", getpass.getuser()) if is_in_tests(): - batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + job_info.BatchName += datetime.now().strftime("%d%m%Y%H%M%S") - # Output driver to render - driver = instance[0] - - # StartFrame to EndFrame by byFrameStep + # Deadline requires integers in frame range frames = "{start}-{end}x{step}".format( start=int(instance.data["frameStart"]), end=int(instance.data["frameEnd"]), step=int(instance.data["byFrameStep"]), ) + job_info.Frames = frames - # Documentation for keys available at: - # https://docs.thinkboxsoftware.com - # /products/deadline/8.0/1_User%20Manual/manual - # /manual-submission.html#job-info-file-options - payload = { - "JobInfo": { - # Top-level group name - "BatchName": batch_name, + job_info.Pool = instance.data.get("primaryPool") + job_info.SecondaryPool = instance.data.get("secondaryPool") + job_info.ChunkSize = instance.data.get("chunkSize", 10) + job_info.Comment = context.data.get("comment") - # Job name, as seen in Monitor - "Name": jobname, - - # Arbitrary username, for visualisation in Monitor - "UserName": deadline_user, - - "Plugin": "Houdini", - "Pool": instance.data.get("primaryPool"), - "secondaryPool": instance.data.get("secondaryPool"), - "Frames": frames, - - "ChunkSize": instance.data.get("chunkSize", 10), - - "Comment": comment - }, - "PluginInfo": { - # Input - "SceneFile": filepath, - "OutputDriver": driver.path(), - - # Mandatory for Deadline - # Houdini version without patch number - "Version": hou.applicationVersionString().rsplit(".", 1)[0], - - "IgnoreInputs": True - }, - - # Mandatory for Deadline, may be empty - "AuxFiles": [] - } - - # Include critical environment variables with submission + api.Session keys = [ - # Submit along the current Avalon tool setup that we launched - # this application with so the Render Slave can build its own - # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" - "AVALON_TOOLS" + "FTRACK_API_KEY", + "FTRACK_API_USER", + "FTRACK_SERVER", + "OPENPYPE_SG_USER", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "AVALON_APP_NAME", + "OPENPYPE_DEV", + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION" ] # Add OpenPype version if we are running from build. @@ -114,61 +96,50 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): keys.append("OPENPYPE_VERSION") # Add mongo url if it's enabled - if context.data.get("deadlinePassMongoUrl"): + if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) + for key in keys: + value = environment.get(key) + if value: + job_info.EnvironmentKeyValue[key] = value - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" - # Include OutputFilename entries - # The first entry also enables double-click to preview rendered - # frames from Deadline Monitor - output_data = {} for i, filepath in enumerate(instance.data["files"]): dirname = os.path.dirname(filepath) fname = os.path.basename(filepath) - output_data["OutputDirectory%d" % i] = dirname.replace("\\", "/") - output_data["OutputFilename%d" % i] = fname + job_info.OutputDirectory += dirname.replace("\\", "/") + job_info.OutputFilename += fname - # For now ensure destination folder exists otherwise HUSK - # will fail to render the output image. This is supposedly fixed - # in new production builds of Houdini - # TODO Remove this workaround with Houdini 18.0.391+ - if not os.path.exists(dirname): - self.log.info("Ensuring output directory exists: %s" % - dirname) - os.makedirs(dirname) + return job_info - payload["JobInfo"].update(output_data) + def get_plugin_info(self): - self.submit(instance, payload) + instance = self._instance + context = instance.context - def submit(self, instance, payload): + # Output driver to render + driver = hou.node(instance.data["instance_node"]) + hou_major_minor = hou.applicationVersionString().rsplit(".", 1)[0] - AVALON_DEADLINE = legacy_io.Session.get("AVALON_DEADLINE", - "http://localhost:8082") - assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + plugin_info = DeadlinePluginInfo( + SceneFile=context.data["currentFile"], + OutputDriver=driver.path(), + Version=hou_major_minor, + IgnoreInputs=True + ) - plugin = payload["JobInfo"]["Plugin"] - self.log.info("Using Render Plugin : {}".format(plugin)) + return attr.asdict(plugin_info) - self.log.info("Submitting..") - self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(AVALON_DEADLINE) - response = requests.post(url, json=payload) - if not response.ok: - raise Exception(response.text) + def process(self, instance): + super(HoudiniSubmitDeadline, self).process(instance) + # TODO: Avoid the need for this logic here, needed for submit publish # Store output dir for unified publisher (filesequence) output_dir = os.path.dirname(instance.data["files"][0]) instance.data["outputDir"] = output_dir - instance.data["deadlineSubmissionJob"] = response.json() + instance.data["toBeRenderedOn"] = "deadline" diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index f646551a07..590acf86c2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -118,11 +118,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): deadline_plugin = "OpenPype" targets = ["local"] - hosts = ["fusion", "max", "maya", "nuke", + hosts = ["fusion", "max", "maya", "nuke", "houdini", "celaction", "aftereffects", "harmony"] families = ["render.farm", "prerender.farm", - "renderlayer", "imagesequence", "maxrender", "vrayscene"] + "renderlayer", "imagesequence", + "vrayscene", "maxrender", + "arnold_rop", "mantra_rop", + "karma_rop", "vray_rop", + "redshift_rop"] aov_filter = {"maya": [r".*([Bb]eauty).*"], "aftereffects": [r".*"], # for everything from AE @@ -140,7 +144,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_SERVER", "AVALON_APP_NAME", "OPENPYPE_USERNAME", - "OPENPYPE_SG_USER", + "OPENPYPE_VERSION", + "OPENPYPE_SG_USER" ] # Add OpenPype version if we are running from build. @@ -820,7 +825,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ).format(source)) family = "render" - if "prerender" in instance.data["families"]: + if ("prerender" in instance.data["families"] or + "prerender.farm" in instance.data["families"]): family = "prerender" families = [family] @@ -1089,6 +1095,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): deadline_publish_job_id = \ self._submit_deadline_post_job(instance, render_job, instances) + # Inject deadline url to instances. + for inst in instances: + inst["deadlineUrl"] = self.deadline_url + # publish job file publish_job = { "asset": asset, diff --git a/openpype/plugins/publish/collect_frames_fix.py b/openpype/plugins/publish/collect_frames_fix.py index 837738eb06..86e727b053 100644 --- a/openpype/plugins/publish/collect_frames_fix.py +++ b/openpype/plugins/publish/collect_frames_fix.py @@ -35,41 +35,47 @@ class CollectFramesFixDef( rewrite_version = attribute_values.get("rewrite_version") - if frames_to_fix: - instance.data["frames_to_fix"] = frames_to_fix + if not frames_to_fix: + return - subset_name = instance.data["subset"] - asset_name = instance.data["asset"] + instance.data["frames_to_fix"] = frames_to_fix - project_entity = instance.data["projectEntity"] - project_name = project_entity["name"] + subset_name = instance.data["subset"] + asset_name = instance.data["asset"] - version = get_last_version_by_subset_name(project_name, - subset_name, - asset_name=asset_name) - if not version: - self.log.warning("No last version found, " - "re-render not possible") - return + project_entity = instance.data["projectEntity"] + project_name = project_entity["name"] - representations = get_representations(project_name, - version_ids=[version["_id"]]) - published_files = [] - for repre in representations: - if repre["context"]["family"] not in self.families: - continue + version = get_last_version_by_subset_name( + project_name, + subset_name, + asset_name=asset_name + ) + if not version: + self.log.warning( + "No last version found, re-render not possible" + ) + return - for file_info in repre.get("files"): - published_files.append(file_info["path"]) + representations = get_representations( + project_name, version_ids=[version["_id"]] + ) + published_files = [] + for repre in representations: + if repre["context"]["family"] not in self.families: + continue - instance.data["last_version_published_files"] = published_files - self.log.debug("last_version_published_files::{}".format( - instance.data["last_version_published_files"])) + for file_info in repre.get("files"): + published_files.append(file_info["path"]) - if rewrite_version: - instance.data["version"] = version["name"] - # limits triggering version validator - instance.data.pop("latestVersion") + instance.data["last_version_published_files"] = published_files + self.log.debug("last_version_published_files::{}".format( + instance.data["last_version_published_files"])) + + if self.rewrite_version_enable and rewrite_version: + instance.data["version"] = version["name"] + # limits triggering version validator + instance.data.pop("latestVersion") @classmethod def get_attribute_defs(cls): diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index cdfc236d5c..07c50efe8c 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -218,6 +218,20 @@ "title": "OpenPype Docs", "command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_nuke_tut')", "tooltip": "Open the OpenPype Nuke user doc page" + }, + { + "type": "action", + "sourcetype": "python", + "title": "Set Frame Start (Read Node)", + "command": "from openpype.hosts.nuke.startup.frame_setting_for_read_nodes import main;main();", + "tooltip": "Set frame start for read node(s)" + }, + { + "type": "action", + "sourcetype": "python", + "title": "Set non publish output for Write Node", + "command": "from openpype.hosts.nuke.startup.custom_write_node import main;main();", + "tooltip": "Open the OpenPype Nuke user doc page" } ] }, diff --git a/openpype/settings/defaults/project_settings/resolve.json b/openpype/settings/defaults/project_settings/resolve.json index da47ae2553..95b3cc66b3 100644 --- a/openpype/settings/defaults/project_settings/resolve.json +++ b/openpype/settings/defaults/project_settings/resolve.json @@ -1,4 +1,5 @@ { + "launch_openpype_menu_on_start": false, "imageio": { "activate_host_color_management": true, "remapping": { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json index 2f10cc900a..650470850e 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json @@ -5,6 +5,11 @@ "label": "DaVinci Resolve", "is_file": true, "children": [ + { + "type": "boolean", + "key": "launch_openpype_menu_on_start", + "label": "Launch OpenPype menu on start of Resolve" + }, { "key": "imageio", "type": "dict", diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py index 3aa6c5d8cb..63ffcc9365 100644 --- a/openpype/tools/launcher/models.py +++ b/openpype/tools/launcher/models.py @@ -273,7 +273,7 @@ class ActionModel(QtGui.QStandardItemModel): # Sort by order and name return sorted( compatible, - key=lambda action: (action.order, action.name) + key=lambda action: (action.order, lib.get_action_label(action)) ) def update_force_not_open_workfile_settings(self, is_checked, action_id): diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index e5d8400031..e58e02f89a 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -446,6 +446,7 @@ class SubsetsModel(BaseRepresentationModel, TreeModel): last_versions_by_subset_id = get_last_versions( project_name, subset_ids, + active=True, fields=["_id", "parent", "name", "type", "data", "schema"] ) diff --git a/openpype/tools/publisher/window.py b/openpype/tools/publisher/window.py index 6ab444109e..006098cb37 100644 --- a/openpype/tools/publisher/window.py +++ b/openpype/tools/publisher/window.py @@ -66,8 +66,7 @@ class PublisherWindow(QtWidgets.QDialog): on_top_flag = QtCore.Qt.Dialog self.setWindowFlags( - self.windowFlags() - | QtCore.Qt.WindowTitleHint + QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowCloseButtonHint diff --git a/openpype/tools/tray/pype_tray.py b/openpype/tools/tray/pype_tray.py index 2f3b5251f9..fdc0a8094d 100644 --- a/openpype/tools/tray/pype_tray.py +++ b/openpype/tools/tray/pype_tray.py @@ -633,10 +633,10 @@ class TrayManager: # Create a copy of sys.argv additional_args = list(sys.argv) - # Check last argument from `get_openpype_execute_args` - # - when running from code it is the same as first from sys.argv - if args[-1] == additional_args[0]: - additional_args.pop(0) + # Remove first argument from 'sys.argv' + # - when running from code the first argument is 'start.py' + # - when running from build the first argument is executable + additional_args.pop(0) cleanup_additional_args = False if use_expected_version: @@ -663,7 +663,6 @@ class TrayManager: additional_args = _additional_args args.extend(additional_args) - run_detached_process(args, env=envs) self.exit() diff --git a/openpype/tools/utils/delegates.py b/openpype/tools/utils/delegates.py index fa69113ef1..c71c87f9b0 100644 --- a/openpype/tools/utils/delegates.py +++ b/openpype/tools/utils/delegates.py @@ -123,10 +123,14 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate): project_name = self.dbcon.active_project() # Add all available versions to the editor parent_id = item["version_document"]["parent"] - version_docs = list(sorted( - get_versions(project_name, subset_ids=[parent_id]), - key=lambda item: item["name"] - )) + version_docs = [ + version_doc + for version_doc in sorted( + get_versions(project_name, subset_ids=[parent_id]), + key=lambda item: item["name"] + ) + if version_doc["data"].get("active", True) + ] hero_versions = list( get_hero_versions( diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py index 31ecf50d3b..53f8894665 100644 --- a/openpype/tools/workfiles/window.py +++ b/openpype/tools/workfiles/window.py @@ -1,6 +1,7 @@ import os import datetime import copy +import platform from qtpy import QtCore, QtWidgets, QtGui from openpype.client import ( @@ -94,6 +95,19 @@ class SidePanelWidget(QtWidgets.QWidget): self._on_note_change() self.save_clicked.emit() + def get_user_name(self, file): + """Get user name from file path""" + # Only run on Unix because pwd module is not available on Windows. + # NOTE: we tried adding "win32security" module but it was not working + # on all hosts so we decided to just support Linux until migration + # to Ayon + if platform.system().lower() == "windows": + return None + import pwd + + filestat = os.stat(file) + return pwd.getpwuid(filestat.st_uid).pw_name + def set_context(self, asset_id, task_name, filepath, workfile_doc): # Check if asset, task and file are selected # NOTE workfile document is not requirement @@ -134,8 +148,14 @@ class SidePanelWidget(QtWidgets.QWidget): "Created:", creation_time.strftime(datetime_format), "Modified:", - modification_time.strftime(datetime_format) + modification_time.strftime(datetime_format), ) + username = self.get_user_name(filepath) + if username: + lines += ( + "User:", + username, + ) self._details_input.appendHtml("
".join(lines)) def get_workfile_data(self): diff --git a/openpype/version.py b/openpype/version.py index dd23138dee..868664c601 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.15.9" +__version__ = "3.15.10-nightly.2" diff --git a/website/docs/artist_hosts_houdini.md b/website/docs/artist_hosts_houdini.md index 8874a0b5cf..0471765365 100644 --- a/website/docs/artist_hosts_houdini.md +++ b/website/docs/artist_hosts_houdini.md @@ -14,7 +14,7 @@ sidebar_label: Houdini - [Library Loader](artist_tools_library-loader) ## Publishing Alembic Cameras -You can publish baked camera in Alembic format. +You can publish baked camera in Alembic format. Select your camera and go **OpenPype -> Create** and select **Camera (abc)**. This will create Alembic ROP in **out** with path and frame range already set. This node will have a name you've @@ -30,7 +30,7 @@ You can use any COP node and publish the image sequence generated from it. For e ![Noise COP](assets/houdini_imagesequence_cop.png) To publish the output of the `radialblur1` go to **OpenPype -> Create** and -select **Composite (Image Sequence)**. If you name the variant *Noise* this will create the `/out/imagesequenceNoise` Composite ROP with the frame range set. +select **Composite (Image Sequence)**. If you name the variant *Noise* this will create the `/out/imagesequenceNoise` Composite ROP with the frame range set. When you hit **Publish** it will render image sequence from selected node. @@ -56,14 +56,14 @@ Now select the `output0` node and go **OpenPype -> Create** and select **Point C Alembic ROP `/out/pointcacheStrange` ## Publishing Reviews (OpenGL) -To generate a review output from Houdini you need to create a **review** instance. +To generate a review output from Houdini you need to create a **review** instance. Go to **OpenPype -> Create** and select **Review**. ![Houdini Create Review](assets/houdini_review_create_attrs.png) -On create, with the **Use Selection** checkbox enabled it will set up the first -camera found in your selection as the camera for the OpenGL ROP node and other -non-cameras are set in **Force Objects**. It will then render those even if +On create, with the **Use Selection** checkbox enabled it will set up the first +camera found in your selection as the camera for the OpenGL ROP node and other +non-cameras are set in **Force Objects**. It will then render those even if their display flag is disabled in your scene. ## Redshift @@ -71,6 +71,18 @@ their display flag is disabled in your scene. This part of documentation is still work in progress. ::: +## Publishing Render to Deadline +Five Renderers(Arnold, Redshift, Mantra, Karma, VRay) are supported for Render Publishing. +They are named with the suffix("_ROP") +To submit render to deadline, you need to create a **Render** instance. +Go to **Openpype -> Create** and select **Publish**. Before clicking **Create** button, +you need select your preferred image rendering format. You can also enable the **Use selection** to +select your render camera. +![Houdini Create Render](assets/houdini_render_publish_creator.png) + +All the render outputs are stored in the pyblish/render directory within your project path.\ +For Karma-specific render, it also outputs the USD render as default. + ## USD (experimental support) ### Publishing USD You can publish your Solaris Stage as USD file. diff --git a/website/docs/assets/houdini_render_publish_creator.png b/website/docs/assets/houdini_render_publish_creator.png new file mode 100644 index 0000000000..5dd73d296a Binary files /dev/null and b/website/docs/assets/houdini_render_publish_creator.png differ diff --git a/website/docs/dev_blender.md b/website/docs/dev_blender.md new file mode 100644 index 0000000000..bed0e4a09d --- /dev/null +++ b/website/docs/dev_blender.md @@ -0,0 +1,61 @@ +--- +id: dev_blender +title: Blender integration +sidebar_label: Blender integration +toc_max_heading_level: 4 +--- + +## Run python script at launch +In case you need to execute a python script when Blender is started (aka [`-P`](https://docs.blender.org/manual/en/latest/advanced/command_line/arguments.html#python-options)), for example to programmatically modify a blender file for conformation, you can create an OpenPype hook as follows: + +```python +from openpype.hosts.blender.hooks import pre_add_run_python_script_arg +from openpype.lib import PreLaunchHook + + +class MyHook(PreLaunchHook): + """Add python script to be executed before Blender launch.""" + + order = pre_add_run_python_script_arg.AddPythonScriptToLaunchArgs.order - 1 + app_groups = [ + "blender", + ] + + def execute(self): + self.launch_context.data.setdefault("python_scripts", []).append( + "/path/to/my_script.py" + ) +``` + +You can write a bare python script, as you could run into the [Text Editor](https://docs.blender.org/manual/en/latest/editors/text_editor.html). + +### Python script with arguments +#### Adding arguments +In case you need to pass arguments to your script, you can append them to `self.launch_context.data["script_args"]`: + +```python +self.launch_context.data.setdefault("script_args", []).append( + "--my-arg", + "value", + ) +``` + +#### Parsing arguments +You can parse arguments in your script using [argparse](https://docs.python.org/3/library/argparse.html) as follows: + +```python +import argparse + +parser = argparse.ArgumentParser( + description="Parsing arguments for my_script.py" +) +parser.add_argument( + "--my-arg", + nargs="?", + help="My argument", +) +args, unknown = arg_parser.parse_known_args( + sys.argv[sys.argv.index("--") + 1 :] +) +print(args.my_arg) +``` diff --git a/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png index 80e00702e6..76dd9b372a 100644 Binary files a/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png and b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png differ diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md index 7bd24a5773..5ddf247d98 100644 --- a/website/docs/project_settings/settings_project_global.md +++ b/website/docs/project_settings/settings_project_global.md @@ -170,12 +170,10 @@ A profile may generate multiple outputs from a single input. Each output must de - **`Letter Box`** - **Enabled** - Enable letter boxes - - **Ratio** - Ratio of letter boxes - - **Type** - **Letterbox** (horizontal bars) or **Pillarbox** (vertical bars) + - **Ratio** - Ratio of letter boxes. Ratio type is calculated from output image dimensions. If letterbox ratio > image ratio, _letterbox_ is applied. Otherwise _pillarbox_ will be rendered. - **Fill color** - Fill color of boxes (RGBA: 0-255) - **Line Thickness** - Line thickness on the edge of box (set to `0` to turn off) - - **Fill color** - Line color on the edge of box (RGBA: 0-255) - - **Example** + - **Line color** - Line color on the edge of box (RGBA: 0-255) ![global_extract_review_letter_box_settings](assets/global_extract_review_letter_box_settings.png) ![global_extract_review_letter_box](assets/global_extract_review_letter_box.png) diff --git a/website/sidebars.js b/website/sidebars.js index 4874782197..267cc7f6d7 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -180,6 +180,7 @@ module.exports = { ] }, "dev_deadline", + "dev_blender", "dev_colorspace" ] };