Merge branch 'develop' into enhancement/OP-6659_thumbnail-color-managed

This commit is contained in:
Jakub Ježek 2023-11-27 16:38:49 +01:00 committed by GitHub
commit 0bd02c15f1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
53 changed files with 2143 additions and 621 deletions

View file

@ -35,6 +35,7 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.7-nightly.3
- 3.17.7-nightly.2
- 3.17.7-nightly.1
- 3.17.6
@ -134,7 +135,6 @@ body:
- 3.15.2-nightly.6
- 3.15.2-nightly.5
- 3.15.2-nightly.4
- 3.15.2-nightly.3
validations:
required: true
- type: dropdown

View file

@ -152,7 +152,9 @@ def get_output_parameter(node):
return node.parm("ar_ass_file")
elif node_type == "Redshift_Proxy_Output":
return node.parm("RS_archive_file")
elif node_type == "ifd":
if node.evalParm("soho_outputmode"):
return node.parm("soho_diskfile")
raise TypeError("Node type '%s' not supported" % node_type)

View file

@ -66,10 +66,6 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_event_callback("new", on_new)
self._has_been_setup = True
# add houdini vendor packages
hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor")
sys.path.append(hou_pythonpath)
# Set asset settings for the empty scene directly after launch of
# Houdini so it initializes into the correct scene FPS,

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating Arnold ASS files."""
from openpype.hosts.houdini.api import plugin
from openpype.lib import BoolDef
class CreateArnoldAss(plugin.HoudiniCreator):
@ -21,6 +22,9 @@ class CreateArnoldAss(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "arnold"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateArnoldAss, self).create(
subset_name,
@ -52,3 +56,15 @@ class CreateArnoldAss(plugin.HoudiniCreator):
# Lock any parameters in this list
to_lock = ["ar_ass_export_enable", "family", "id"]
self.lock_parameters(instance_node, to_lock)
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -2,8 +2,8 @@
"""Creator plugin for creating pointcache bgeo files."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance, CreatorError
from openpype.lib import EnumDef
import hou
from openpype.lib import EnumDef, BoolDef
class CreateBGEO(plugin.HoudiniCreator):
@ -18,6 +18,9 @@ class CreateBGEO(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "geometry"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateBGEO, self).create(
subset_name,
@ -58,6 +61,13 @@ class CreateBGEO(plugin.HoudiniCreator):
instance_node.setParms(parms)
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
bgeo_enum = [
@ -89,7 +99,7 @@ class CreateBGEO(plugin.HoudiniCreator):
return attrs + [
EnumDef("bgeo_type", bgeo_enum, label="BGEO Options"),
]
] + self.get_instance_attr_defs()
def get_network_categories(self):
return [

View file

@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import BoolDef
class CreateMantraIFD(plugin.HoudiniCreator):
"""Mantra .ifd Archive"""
identifier = "io.openpype.creators.houdini.mantraifd"
label = "Mantra IFD"
family = "mantraifd"
icon = "gears"
def create(self, subset_name, instance_data, pre_create_data):
import hou
instance_data.pop("active", None)
instance_data.update({"node_type": "ifd"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateMantraIFD, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
filepath = "{}{}".format(
hou.text.expandString("$HIP/pyblish/"),
"{}.$F4.ifd".format(subset_name))
parms = {
# Render frame range
"trange": 1,
# Arnold ROP settings
"soho_diskfile": filepath,
"soho_outputmode": 1
}
instance_node.setParms(parms)
# Lock any parameters in this list
to_lock = ["soho_outputmode", "family", "id"]
self.lock_parameters(instance_node, to_lock)
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.houdini.api import plugin
from openpype.lib import BoolDef
import hou
class CreatePointCache(plugin.HoudiniCreator):
"""Alembic ROP to pointcache"""
identifier = "io.openpype.creators.houdini.pointcache"
@ -15,6 +17,9 @@ class CreatePointCache(plugin.HoudiniCreator):
def create(self, subset_name, instance_data, pre_create_data):
instance_data.pop("active", None)
instance_data.update({"node_type": "alembic"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreatePointCache, self).create(
subset_name,
@ -105,3 +110,15 @@ class CreatePointCache(plugin.HoudiniCreator):
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -2,6 +2,7 @@
"""Creator plugin for creating Redshift proxies."""
from openpype.hosts.houdini.api import plugin
import hou
from openpype.lib import BoolDef
class CreateRedshiftProxy(plugin.HoudiniCreator):
@ -24,6 +25,9 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
# TODO: Somehow enforce so that it only shows the original limited
# attributes of the Redshift_Proxy_Output node type
instance_data.update({"node_type": "Redshift_Proxy_Output"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateRedshiftProxy, self).create(
subset_name,
@ -50,3 +54,15 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -2,6 +2,7 @@
"""Creator plugin for creating VDB Caches."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import BoolDef
import hou
@ -19,15 +20,20 @@ class CreateVDBCache(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "geometry"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateVDBCache, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
file_path = "{}{}".format(
hou.text.expandString("$HIP/pyblish/"),
"{}.$F4.vdb".format(subset_name))
parms = {
"sopoutput": "$HIP/pyblish/{}.$F4.vdb".format(subset_name),
"sopoutput": file_path,
"initsim": True,
"trange": 1
}
@ -103,3 +109,15 @@ class CreateVDBCache(plugin.HoudiniCreator):
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -0,0 +1,75 @@
import os
import pyblish.api
import hou
from openpype.hosts.houdini.api import lib
class CollectDataforCache(pyblish.api.InstancePlugin):
"""Collect data for caching to Deadline."""
order = pyblish.api.CollectorOrder + 0.04
families = ["ass", "pointcache",
"mantraifd", "redshiftproxy",
"vdbcache"]
hosts = ["houdini"]
targets = ["local", "remote"]
label = "Collect Data for Cache"
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
farm_enabled = creator_attribute["farm"]
instance.data["farm"] = farm_enabled
if not farm_enabled:
self.log.debug("Caching on farm is disabled. "
"Skipping farm collecting.")
return
# Why do we need this particular collector to collect the expected
# output files from a ROP node. Don't we have a dedicated collector
# for that yet?
# Collect expected files
ropnode = hou.node(instance.data["instance_node"])
output_parm = lib.get_output_parameter(ropnode)
expected_filepath = output_parm.eval()
instance.data.setdefault("files", list())
instance.data.setdefault("expectedFiles", list())
if instance.data.get("frames"):
files = self.get_files(instance, expected_filepath)
# list of files
instance.data["files"].extend(files)
else:
# single file
instance.data["files"].append(output_parm.eval())
cache_files = {"_": instance.data["files"]}
# Convert instance family to pointcache if it is bgeo or abc
# because ???
for family in instance.data["families"]:
if family == "bgeo" or "abc":
instance.data["family"] = "pointcache"
break
instance.data.update({
"plugin": "Houdini",
"publish": True
})
instance.data["families"].append("publish.hou")
instance.data["expectedFiles"].append(cache_files)
self.log.debug("{}".format(instance.data))
def get_files(self, instance, output_parm):
"""Get the files with the frame range data
Args:
instance (_type_): instance
output_parm (_type_): path of output parameter
Returns:
files: a list of files
"""
directory = os.path.dirname(output_parm)
files = [
os.path.join(directory, frame).replace("\\", "/")
for frame in instance.data["frames"]
]
return files

View file

@ -0,0 +1,39 @@
import pyblish.api
from openpype.lib import NumberDef
from openpype.pipeline import OpenPypePyblishPluginMixin
class CollectChunkSize(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
"""Collect chunk size for cache submission to Deadline."""
order = pyblish.api.CollectorOrder + 0.05
families = ["ass", "pointcache",
"vdbcache", "mantraifd",
"redshiftproxy"]
hosts = ["houdini"]
targets = ["local", "remote"]
label = "Collect Chunk Size"
chunkSize = 999999
def process(self, instance):
# need to get the chunk size info from the setting
attr_values = self.get_attr_values_from_data(instance.data)
instance.data["chunkSize"] = attr_values.get("chunkSize")
@classmethod
def apply_settings(cls, project_settings):
project_setting = project_settings["houdini"]["publish"]["CollectChunkSize"] # noqa
cls.chunkSize = project_setting["chunk_size"]
@classmethod
def get_attribute_defs(cls):
return [
NumberDef("chunkSize",
minimum=1,
maximum=999999,
decimals=0,
default=cls.chunkSize,
label="Frame Per Task")
]

View file

@ -16,7 +16,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass",
"redshiftproxy", "review", "bgeo"]
"mantraifd", "redshiftproxy", "review",
"bgeo"]
def process(self, instance):

View file

@ -14,8 +14,12 @@ class ExtractAlembic(publish.Extractor):
label = "Extract Alembic"
hosts = ["houdini"]
families = ["abc", "camera"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])

View file

@ -14,9 +14,12 @@ class ExtractAss(publish.Extractor):
label = "Extract Ass"
families = ["ass"]
hosts = ["houdini"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter

View file

@ -17,7 +17,9 @@ class ExtractBGEO(publish.Extractor):
families = ["bgeo"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter

View file

@ -0,0 +1,51 @@
import os
import pyblish.api
from openpype.pipeline import publish
import hou
class ExtractMantraIFD(publish.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract Mantra ifd"
hosts = ["houdini"]
families = ["mantraifd"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data.get("instance_node"))
output = ropnode.evalParm("soho_diskfile")
staging_dir = os.path.dirname(output)
instance.data["stagingDir"] = staging_dir
files = instance.data["frames"]
missing_frames = [
frame
for frame in instance.data["frames"]
if not os.path.exists(
os.path.normpath(os.path.join(staging_dir, frame)))
]
if missing_frames:
raise RuntimeError("Failed to complete Mantra ifd extraction. "
"Missing output files: {}".format(
missing_frames))
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ifd',
'ext': 'ifd',
'files': files,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
}
instance.data["representations"].append(representation)

View file

@ -14,9 +14,12 @@ class ExtractRedshiftProxy(publish.Extractor):
label = "Extract Redshift Proxy"
families = ["redshiftproxy"]
hosts = ["houdini"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data.get("instance_node"))
# Get the filename from the filename parameter

View file

@ -16,7 +16,9 @@ class ExtractVDBCache(publish.Extractor):
hosts = ["houdini"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter

View file

@ -22,7 +22,8 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
"arnold_rop",
"mantra_rop",
"karma_rop",
"usdrender"]
"usdrender",
"publish.hou"]
optional = True
def process(self, context):

View file

@ -20,7 +20,7 @@ class ValidateHoudiniNotApprenticeLicense(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
families = ["usd", "abc"]
families = ["usd", "abc", "fbx", "camera"]
hosts = ["houdini"]
label = "Houdini Apprentice License"

View file

@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
def main():
print("Installing OpenPype ...")
install_host(HoudiniHost())
main()

View file

@ -1 +0,0 @@
__path__ = __import__('pkgutil').extend_path(__path__, __name__)

View file

@ -1,152 +0,0 @@
import os
import hou
import husdoutputprocessors.base as base
import colorbleed.usdlib as usdlib
from openpype.client import get_asset_by_name
from openpype.pipeline import Anatomy, get_current_project_name
class AvalonURIOutputProcessor(base.OutputProcessorBase):
"""Process Avalon URIs into their full path equivalents.
"""
_parameters = None
_param_prefix = 'avalonurioutputprocessor_'
_parms = {
"use_publish_paths": _param_prefix + "use_publish_paths"
}
def __init__(self):
""" There is only one object of each output processor class that is
ever created in a Houdini session. Therefore be very careful
about what data gets put in this object.
"""
self._use_publish_paths = False
self._cache = dict()
def displayName(self):
return 'Avalon URI Output Processor'
def parameters(self):
if not self._parameters:
parameters = hou.ParmTemplateGroup()
use_publish_path = hou.ToggleParmTemplate(
name=self._parms["use_publish_paths"],
label='Resolve Reference paths to publish paths',
default_value=False,
help=("When enabled any paths for Layers, References or "
"Payloads are resolved to published master versions.\n"
"This is usually only used by the publishing pipeline, "
"but can be used for testing too."))
parameters.append(use_publish_path)
self._parameters = parameters.asDialogScript()
return self._parameters
def beginSave(self, config_node, t):
parm = self._parms["use_publish_paths"]
self._use_publish_paths = config_node.parm(parm).evalAtTime(t)
self._cache.clear()
def endSave(self):
self._use_publish_paths = None
self._cache.clear()
def processAsset(self,
asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Retrieve from cache if this query occurred before (optimization)
cache_key = (asset_path, asset_path_for_save, asset_is_layer, for_save)
if cache_key in self._cache:
return self._cache[cache_key]
relative_template = "{asset}_{subset}.{ext}"
uri_data = usdlib.parse_avalon_uri(asset_path)
if uri_data:
if for_save:
# Set save output path to a relative path so other
# processors can potentially manage it easily?
path = relative_template.format(**uri_data)
print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
self._cache[cache_key] = path
return path
if self._use_publish_paths:
# Resolve to an Avalon published asset for embedded paths
path = self._get_usd_master_path(**uri_data)
else:
path = relative_template.format(**uri_data)
print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
self._cache[cache_key] = path
return path
self._cache[cache_key] = asset_path
return asset_path
def _get_usd_master_path(self,
asset,
subset,
ext):
"""Get the filepath for a .usd file of a subset.
This will return the path to an unversioned master file generated by
`usd_master_file.py`.
"""
PROJECT = get_current_project_name()
anatomy = Anatomy(PROJECT)
asset_doc = get_asset_by_name(PROJECT, asset)
if not asset_doc:
raise RuntimeError("Invalid asset name: '%s'" % asset)
template_obj = anatomy.templates_obj["publish"]["path"]
path = template_obj.format_strict({
"project": PROJECT,
"asset": asset_doc["name"],
"subset": subset,
"representation": ext,
"version": 0 # stub version zero
})
# Remove the version folder
subset_folder = os.path.dirname(os.path.dirname(path))
master_folder = os.path.join(subset_folder, "master")
fname = "{0}.{1}".format(subset, ext)
return os.path.join(master_folder, fname).replace("\\", "/")
output_processor = AvalonURIOutputProcessor()
def usdOutputProcessor():
return output_processor

View file

@ -1,90 +0,0 @@
import hou
import husdoutputprocessors.base as base
import os
class StagingDirOutputProcessor(base.OutputProcessorBase):
"""Output all USD Rop file nodes into the Staging Directory
Ignore any folders and paths set in the Configured Layers
and USD Rop node, just take the filename and save into a
single directory.
"""
theParameters = None
parameter_prefix = "stagingdiroutputprocessor_"
stagingdir_parm_name = parameter_prefix + "stagingDir"
def __init__(self):
self.staging_dir = None
def displayName(self):
return 'StagingDir Output Processor'
def parameters(self):
if not self.theParameters:
parameters = hou.ParmTemplateGroup()
rootdirparm = hou.StringParmTemplate(
self.stagingdir_parm_name,
'Staging Directory', 1,
string_type=hou.stringParmType.FileReference,
file_type=hou.fileType.Directory
)
parameters.append(rootdirparm)
self.theParameters = parameters.asDialogScript()
return self.theParameters
def beginSave(self, config_node, t):
# Use the Root Directory parameter if it is set.
root_dir_parm = config_node.parm(self.stagingdir_parm_name)
if root_dir_parm:
self.staging_dir = root_dir_parm.evalAtTime(t)
if not self.staging_dir:
out_file_parm = config_node.parm('lopoutput')
if out_file_parm:
self.staging_dir = out_file_parm.evalAtTime(t)
if self.staging_dir:
(self.staging_dir, filename) = os.path.split(self.staging_dir)
def endSave(self):
self.staging_dir = None
def processAsset(self, asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Treat save paths as being relative to the output path.
if for_save and self.staging_dir:
# Whenever we're processing a Save Path make sure to
# resolve it to the Staging Directory
filename = os.path.basename(asset_path)
return os.path.join(self.staging_dir, filename)
return asset_path
output_processor = StagingDirOutputProcessor()
def usdOutputProcessor():
return output_processor

View file

@ -102,8 +102,6 @@ _alembic_options = {
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
RENDERLIKE_INSTANCE_FAMILIES = ["rendering", "vrayscene"]
DISPLAY_LIGHTS_ENUM = [
{"label": "Use Project Settings", "value": "project_settings"},
@ -3021,194 +3019,6 @@ class shelf():
cmds.shelfLayout(self.name, p="ShelfLayout")
def _get_render_instances():
"""Return all 'render-like' instances.
This returns list of instance sets that needs to receive information
about render layer changes.
Returns:
list: list of instances
"""
objectset = cmds.ls("*.id", long=True, exactType="objectSet",
recursive=True, objectsOnly=True)
instances = []
for objset in objectset:
if not cmds.attributeQuery("id", node=objset, exists=True):
continue
id_attr = "{}.id".format(objset)
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
continue
has_family = cmds.attributeQuery("family",
node=objset,
exists=True)
if not has_family:
continue
if cmds.getAttr(
"{}.family".format(objset)) in RENDERLIKE_INSTANCE_FAMILIES:
instances.append(objset)
return instances
renderItemObserverList = []
class RenderSetupListObserver:
"""Observer to catch changes in render setup layers."""
def listItemAdded(self, item):
print("--- adding ...")
self._add_render_layer(item)
def listItemRemoved(self, item):
print("--- removing ...")
self._remove_render_layer(item.name())
def _add_render_layer(self, item):
render_sets = _get_render_instances()
layer_name = item.name()
for render_set in render_sets:
members = cmds.sets(render_set, query=True) or []
namespace_name = "_{}".format(render_set)
if not cmds.namespace(exists=namespace_name):
index = 1
namespace_name = "_{}".format(render_set)
try:
cmds.namespace(rm=namespace_name)
except RuntimeError:
# namespace is not empty, so we leave it untouched
pass
orignal_namespace_name = namespace_name
while(cmds.namespace(exists=namespace_name)):
namespace_name = "{}{}".format(
orignal_namespace_name, index)
index += 1
namespace = cmds.namespace(add=namespace_name)
if members:
# if set already have namespaced members, use the same
# namespace as others.
namespace = members[0].rpartition(":")[0]
else:
namespace = namespace_name
render_layer_set_name = "{}:{}".format(namespace, layer_name)
if render_layer_set_name in members:
continue
print(" - creating set for {}".format(layer_name))
maya_set = cmds.sets(n=render_layer_set_name, empty=True)
cmds.sets(maya_set, forceElement=render_set)
rio = RenderSetupItemObserver(item)
print("- adding observer for {}".format(item.name()))
item.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def _remove_render_layer(self, layer_name):
render_sets = _get_render_instances()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
render_layer_set_name = "{}:{}".format(namespace, layer_name)
if render_layer_set_name in members:
print(" - removing set for {}".format(layer_name))
cmds.delete(render_layer_set_name)
class RenderSetupItemObserver:
"""Handle changes in render setup items."""
def __init__(self, item):
self.item = item
self.original_name = item.name()
def itemChanged(self, *args, **kwargs):
"""Item changed callback."""
if self.item.name() == self.original_name:
return
render_sets = _get_render_instances()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
render_layer_set_name = "{}:{}".format(
namespace, self.original_name)
if render_layer_set_name in members:
print(" <> renaming {} to {}".format(self.original_name,
self.item.name()))
cmds.rename(render_layer_set_name,
"{}:{}".format(
namespace, self.item.name()))
self.original_name = self.item.name()
renderListObserver = RenderSetupListObserver()
def add_render_layer_change_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
rs = renderSetup.instance()
render_sets = _get_render_instances()
layers = rs.getRenderLayers()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
for layer in layers:
render_layer_set_name = "{}:{}".format(namespace, layer.name())
if render_layer_set_name not in members:
continue
rio = RenderSetupItemObserver(layer)
print("- adding observer for {}".format(layer.name()))
layer.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def add_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("> adding renderSetup observer ...")
rs = renderSetup.instance()
rs.addListObserver(renderListObserver)
pass
def remove_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("< removing renderSetup observer ...")
rs = renderSetup.instance()
try:
rs.removeListObserver(renderListObserver)
except ValueError:
# no observer set yet
pass
def update_content_on_context_change():
"""
This will update scene content to match new asset on context change

View file

@ -580,20 +580,11 @@ def on_save():
lib.set_id(node, new_id, overwrite=False)
def _update_render_layer_observers():
# Helper to trigger update for all renderlayer observer logic
lib.remove_render_layer_observer()
lib.add_render_layer_observer()
lib.add_render_layer_change_observer()
def on_open():
"""On scene open let's assume the containers have changed."""
from openpype.widgets import popup
utils.executeDeferred(_update_render_layer_observers)
# Validate FPS after update_task_from_path to
# ensure it is using correct FPS for the asset
lib.validate_fps()
@ -630,7 +621,6 @@ def on_new():
with lib.suspended_refresh():
lib.set_context_settings()
utils.executeDeferred(_update_render_layer_observers)
_remove_workfile_lock()

View file

@ -50,11 +50,11 @@ class ExtractRedshiftProxy(publish.Extractor):
# Padding is taken from number of digits of the end_frame.
# Not sure where Redshift is taking it.
repr_files = [
"{}.{}{}".format(root, str(frame).rjust(4, "0"), ext) # noqa: E501
"{}.{}{}".format(os.path.basename(root), str(frame).rjust(4, "0"), ext) # noqa: E501
for frame in range(
int(start_frame),
int(end_frame) + 1,
int(instance.data["step"]),
int(instance.data["step"])
)]
# vertex_colors = instance.data.get("vertexColors", False)

View file

@ -111,7 +111,6 @@ class ValidateNukeWriteNode(
for value in values:
if type(node_value) in (int, float):
try:
if isinstance(value, list):
value = color_gui_to_int(value)
else:
@ -130,7 +129,7 @@ class ValidateNukeWriteNode(
and key != "file"
and key != "tile_color"
):
check.append([key, value, write_node[key].value()])
check.append([key, node_value, write_node[key].value()])
if check:
self._make_error(check)

View file

@ -0,0 +1,8 @@
from .module import (
AssetReporterAction
)
__all__ = (
"AssetReporterAction",
)

View file

@ -0,0 +1,27 @@
import os.path
from openpype import AYON_SERVER_ENABLED
from openpype.modules import OpenPypeModule, ITrayAction
from openpype.lib import run_detached_process, get_openpype_execute_args
class AssetReporterAction(OpenPypeModule, ITrayAction):
label = "Asset Usage Report"
name = "asset_reporter"
def tray_init(self):
pass
def initialize(self, modules_settings):
self.enabled = not AYON_SERVER_ENABLED
def on_action_trigger(self):
args = get_openpype_execute_args()
args += ["run",
os.path.join(
os.path.dirname(__file__),
"window.py")]
print(" ".join(args))
run_detached_process(args)

View file

@ -0,0 +1,418 @@
"""Tool for generating asset usage report.
This tool is used to generate asset usage report for a project.
It is using links between published version to find out where
the asset is used.
"""
import csv
import time
import appdirs
import qtawesome
from pymongo.collection import Collection
from qtpy import QtCore, QtWidgets
from qtpy.QtGui import QClipboard, QColor
from openpype import style
from openpype.client import OpenPypeMongoConnection
from openpype.lib import JSONSettingRegistry
from openpype.tools.utils import PlaceholderLineEdit, get_openpype_qt_app
from openpype.tools.utils.constants import PROJECT_NAME_ROLE
from openpype.tools.utils.models import ProjectModel, ProjectSortFilterProxy
class AssetReporterRegistry(JSONSettingRegistry):
"""Class handling OpenPype general settings registry.
This is used to store last selected project.
Attributes:
vendor (str): Name used for path construction.
product (str): Additional name used for path construction.
"""
def __init__(self):
self.vendor = "ynput"
self.product = "openpype"
name = "asset_usage_reporter"
path = appdirs.user_data_dir(self.product, self.vendor)
super(AssetReporterRegistry, self).__init__(name, path)
class OverlayWidget(QtWidgets.QFrame):
"""Overlay widget for choosing project.
This code is taken from the Tray Publisher tool.
"""
project_selected = QtCore.Signal(str)
def __init__(self, publisher_window):
super(OverlayWidget, self).__init__(publisher_window)
self.setObjectName("OverlayFrame")
middle_frame = QtWidgets.QFrame(self)
middle_frame.setObjectName("ChooseProjectFrame")
content_widget = QtWidgets.QWidget(middle_frame)
header_label = QtWidgets.QLabel("Choose project", content_widget)
header_label.setObjectName("ChooseProjectLabel")
# Create project models and view
projects_model = ProjectModel()
projects_proxy = ProjectSortFilterProxy()
projects_proxy.setSourceModel(projects_model)
projects_proxy.setFilterKeyColumn(0)
projects_view = QtWidgets.QListView(content_widget)
projects_view.setObjectName("ChooseProjectView")
projects_view.setModel(projects_proxy)
projects_view.setEditTriggers(
QtWidgets.QAbstractItemView.NoEditTriggers
)
confirm_btn = QtWidgets.QPushButton("Confirm", content_widget)
cancel_btn = QtWidgets.QPushButton("Cancel", content_widget)
cancel_btn.setVisible(False)
btns_layout = QtWidgets.QHBoxLayout()
btns_layout.addStretch(1)
btns_layout.addWidget(cancel_btn, 0)
btns_layout.addWidget(confirm_btn, 0)
txt_filter = PlaceholderLineEdit(content_widget)
txt_filter.setPlaceholderText("Quick filter projects..")
txt_filter.setClearButtonEnabled(True)
txt_filter.addAction(qtawesome.icon("fa.filter", color="gray"),
QtWidgets.QLineEdit.LeadingPosition)
content_layout = QtWidgets.QVBoxLayout(content_widget)
content_layout.setContentsMargins(0, 0, 0, 0)
content_layout.setSpacing(20)
content_layout.addWidget(header_label, 0)
content_layout.addWidget(txt_filter, 0)
content_layout.addWidget(projects_view, 1)
content_layout.addLayout(btns_layout, 0)
middle_layout = QtWidgets.QHBoxLayout(middle_frame)
middle_layout.setContentsMargins(30, 30, 10, 10)
middle_layout.addWidget(content_widget)
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(10, 10, 10, 10)
main_layout.addStretch(1)
main_layout.addWidget(middle_frame, 2)
main_layout.addStretch(1)
projects_view.doubleClicked.connect(self._on_double_click)
confirm_btn.clicked.connect(self._on_confirm_click)
cancel_btn.clicked.connect(self._on_cancel_click)
txt_filter.textChanged.connect(self._on_text_changed)
self._projects_view = projects_view
self._projects_model = projects_model
self._projects_proxy = projects_proxy
self._cancel_btn = cancel_btn
self._confirm_btn = confirm_btn
self._txt_filter = txt_filter
self._publisher_window = publisher_window
self._project_name = None
def showEvent(self, event):
self._projects_model.refresh()
# Sort projects after refresh
self._projects_proxy.sort(0)
setting_registry = AssetReporterRegistry()
try:
project_name = str(setting_registry.get_item("project_name"))
except ValueError:
project_name = None
if project_name:
index = None
src_index = self._projects_model.find_project(project_name)
if src_index is not None:
index = self._projects_proxy.mapFromSource(src_index)
if index is not None:
selection_model = self._projects_view.selectionModel()
selection_model.select(
index,
QtCore.QItemSelectionModel.SelectCurrent
)
self._projects_view.setCurrentIndex(index)
self._cancel_btn.setVisible(self._project_name is not None)
super(OverlayWidget, self).showEvent(event)
def _on_double_click(self):
self.set_selected_project()
def _on_confirm_click(self):
self.set_selected_project()
def _on_cancel_click(self):
self._set_project(self._project_name)
def _on_text_changed(self):
self._projects_proxy.setFilterRegularExpression(
self._txt_filter.text())
def set_selected_project(self):
index = self._projects_view.currentIndex()
if project_name := index.data(PROJECT_NAME_ROLE):
self._set_project(project_name)
def _set_project(self, project_name):
self._project_name = project_name
self.setVisible(False)
self.project_selected.emit(project_name)
setting_registry = AssetReporterRegistry()
setting_registry.set_item("project_name", project_name)
class AssetReporterWindow(QtWidgets.QDialog):
default_width = 1000
default_height = 800
_content = None
def __init__(self, parent=None, controller=None, reset_on_show=None):
super(AssetReporterWindow, self).__init__(parent)
self._result = {}
self.setObjectName("AssetReporterWindow")
self.setWindowTitle("Asset Usage Reporter")
if parent is None:
on_top_flag = QtCore.Qt.WindowStaysOnTopHint
else:
on_top_flag = QtCore.Qt.Dialog
self.setWindowFlags(
QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowMaximizeButtonHint
| QtCore.Qt.WindowMinimizeButtonHint
| QtCore.Qt.WindowCloseButtonHint
| on_top_flag
)
self.table = QtWidgets.QTableWidget(self)
self.table.setColumnCount(3)
self.table.setColumnWidth(0, 400)
self.table.setColumnWidth(1, 300)
self.table.setHorizontalHeaderLabels(["Subset", "Used in", "Version"])
# self.text_area = QtWidgets.QTextEdit(self)
self.copy_button = QtWidgets.QPushButton('Copy to Clipboard', self)
self.save_button = QtWidgets.QPushButton('Save to CSV File', self)
self.copy_button.clicked.connect(self.copy_to_clipboard)
self.save_button.clicked.connect(self.save_to_file)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.table)
# layout.addWidget(self.text_area)
layout.addWidget(self.copy_button)
layout.addWidget(self.save_button)
self.resize(self.default_width, self.default_height)
self.setStyleSheet(style.load_stylesheet())
overlay_widget = OverlayWidget(self)
overlay_widget.project_selected.connect(self._on_project_select)
self._overlay_widget = overlay_widget
def _on_project_select(self, project_name: str):
"""Generate table when project is selected.
This will generate the table and fill it with data.
Source data are held in memory in `_result` attribute that
is used to transform them into clipboard or csv file.
"""
self._project_name = project_name
self.process()
if not self._result:
self.set_content("no result generated")
return
rows = sum(len(value) for key, value in self._result.items())
self.table.setRowCount(rows)
row = 0
content = []
for key, value in self._result.items():
item = QtWidgets.QTableWidgetItem(key)
# this doesn't work as it is probably overriden by stylesheet?
# item.setBackground(QColor(32, 32, 32))
self.table.setItem(row, 0, item)
for source in value:
self.table.setItem(
row, 1, QtWidgets.QTableWidgetItem(source["name"]))
self.table.setItem(
row, 2, QtWidgets.QTableWidgetItem(
str(source["version"])))
row += 1
# generate clipboard content
content.append(key)
content.extend(
f"\t{source['name']} (v{source['version']})" for source in value # noqa: E501
)
self.set_content("\n".join(content))
def copy_to_clipboard(self):
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(self._content, QClipboard.Clipboard)
def save_to_file(self):
file_name, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File')
if file_name:
self._write_csv(file_name)
def set_content(self, content):
self._content = content
def get_content(self):
return self._content
def _resize_overlay(self):
self._overlay_widget.resize(
self.width(),
self.height()
)
def resizeEvent(self, event):
super(AssetReporterWindow, self).resizeEvent(event)
self._resize_overlay()
def _get_subset(self, version_id, project: Collection):
pipeline = [
{
"$match": {
"_id": version_id
},
}, {
"$lookup": {
"from": project.name,
"localField": "parent",
"foreignField": "_id",
"as": "parents"
}
}
]
result = project.aggregate(pipeline)
doc = next(result)
# print(doc)
return {
"name": f'{"/".join(doc["parents"][0]["data"]["parents"])}/{doc["parents"][0]["name"]}/{doc["name"]}', # noqa: E501
"family": doc["data"].get("family") or doc["data"].get("families")[0] # noqa: E501
}
def process(self):
"""Generate asset usage report data.
This is the main method of the tool. It is using MongoDB
aggregation pipeline to find all published versions that
are used as input for other published versions. Then it
generates a map of assets and their usage.
"""
start = time.perf_counter()
project = self._project_name
# get all versions of published workfiles that has non-empty
# inputLinks and connect it with their respective documents
# using ID.
pipeline = [
{
"$match": {
"data.inputLinks": {
"$exists": True,
"$ne": []
},
"data.families": {"$in": ["workfile"]}
}
}, {
"$lookup": {
"from": project,
"localField": "data.inputLinks.id",
"foreignField": "_id",
"as": "linked_docs"
}
}
]
client = OpenPypeMongoConnection.get_mongo_client()
db = client["avalon"]
result = db[project].aggregate(pipeline)
asset_map = []
# this is creating the map - for every workfile and its linked
# documents, create a dictionary with "source" and "refs" keys
# and resolve the subset name and version from the document
for doc in result:
source = {
"source": self._get_subset(doc["parent"], db[project]),
}
source["source"].update({"version": doc["name"]})
refs = []
version = '<unknown>'
for linked in doc["linked_docs"]:
try:
version = f'v{linked["name"]}'
except KeyError:
if linked["type"] == "hero_version":
version = "hero"
finally:
refs.append({
"subset": self._get_subset(
linked["parent"], db[project]),
"version": version
})
source["refs"] = refs
asset_map.append(source)
grouped = {}
# this will group the assets by subset name and version
for asset in asset_map:
for ref in asset["refs"]:
key = f'{ref["subset"]["name"]} ({ref["version"]})'
if key in grouped:
grouped[key].append(asset["source"])
else:
grouped[key] = [asset["source"]]
self._result = grouped
end = time.perf_counter()
print(f"Finished in {end - start:0.4f} seconds", 2)
def _write_csv(self, file_name: str) -> None:
"""Write CSV file with results."""
with open(file_name, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=";")
writer.writerow(["Subset", "Used in", "Version"])
for key, value in self._result.items():
writer.writerow([key, "", ""])
for source in value:
writer.writerow(["", source["name"], source["version"]])
def main():
app_instance = get_openpype_qt_app()
window = AssetReporterWindow()
window.show()
app_instance.exec_()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,190 @@
import os
import getpass
from datetime import datetime
import hou
import attr
import pyblish.api
from openpype.lib import (
TextDef,
NumberDef,
)
from openpype.pipeline import (
legacy_io,
OpenPypePyblishPluginMixin
)
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@attr.s
class HoudiniPluginInfo(object):
Build = attr.ib(default=None)
IgnoreInputs = attr.ib(default=True)
ScriptJob = attr.ib(default=True)
SceneFile = attr.ib(default=None) # Input
SaveFile = attr.ib(default=True)
ScriptFilename = attr.ib(default=None)
OutputDriver = attr.ib(default=None)
Version = attr.ib(default=None) # Mandatory for Deadline
ProjectPath = attr.ib(default=None)
class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # noqa
OpenPypePyblishPluginMixin):
"""Submit Houdini scene to perform a local publish in Deadline.
Publishing in Deadline can be helpful for scenes that publish very slow.
This way it can process in the background on another machine without the
Artist having to wait for the publish to finish on their local machine.
Submission is done through the Deadline Web Service as
supplied via the environment variable AVALON_DEADLINE.
"""
label = "Submit Scene to Deadline"
order = pyblish.api.IntegratorOrder
hosts = ["houdini"]
families = ["publish.hou"]
targets = ["local"]
priority = 50
jobInfo = {}
pluginInfo = {}
group = None
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="Houdini")
job_info.update(self.jobInfo)
instance = self._instance
context = instance.context
assert all(
result["success"] for result in context.data["results"]
), "Errors found, aborting integration.."
# Deadline connection
AVALON_DEADLINE = legacy_io.Session.get(
"AVALON_DEADLINE", "http://localhost:8082"
)
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
project_name = instance.context.data["projectName"]
filepath = context.data["currentFile"]
scenename = os.path.basename(filepath)
job_name = "{scene} - {instance} [PUBLISH]".format(
scene=scenename, instance=instance.name)
batch_name = "{code} - {scene}".format(code=project_name,
scene=scenename)
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
job_info.Name = job_name
job_info.BatchName = batch_name
job_info.Plugin = instance.data["plugin"]
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
rop_node = self.get_rop_node(instance)
if rop_node.type().name() != "alembic":
frames = "{start}-{end}x{step}".format(
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"]),
step=int(instance.data["byFrameStep"]),
)
job_info.Frames = frames
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
attr_values = self.get_attr_values_from_data(instance.data)
job_info.ChunkSize = instance.data["chunkSize"]
job_info.Comment = context.data.get("comment")
job_info.Priority = attr_values.get("priority", self.priority)
job_info.Group = attr_values.get("group", self.group)
keys = [
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"OPENPYPE_SG_USER",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_LOG_NO_COLORS",
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
for key in keys:
value = environment.get(key)
if not value:
continue
job_info.EnvironmentKeyValue[key] = value
# to recognize render jobs
job_info.add_render_job_env_var()
return job_info
def get_plugin_info(self):
instance = self._instance
version = hou.applicationVersionString()
version = ".".join(version.split(".")[:2])
rop = self.get_rop_node(instance)
plugin_info = HoudiniPluginInfo(
Build=None,
IgnoreInputs=True,
ScriptJob=True,
SceneFile=self.scene_path,
SaveFile=True,
OutputDriver=rop.path(),
Version=version,
ProjectPath=os.path.dirname(self.scene_path)
)
plugin_payload = attr.asdict(plugin_info)
return plugin_payload
def process(self, instance):
super(HoudiniCacheSubmitDeadline, self).process(instance)
output_dir = os.path.dirname(instance.data["files"][0])
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
def get_rop_node(self, instance):
rop = instance.data.get("instance_node")
rop_node = hou.node(rop)
return rop_node
@classmethod
def get_attribute_defs(cls):
defs = super(HoudiniCacheSubmitDeadline, cls).get_attribute_defs()
defs.extend([
NumberDef("priority",
minimum=1,
maximum=250,
decimals=0,
default=cls.priority,
label="Priority"),
TextDef("group",
default=cls.group,
label="Group Name"),
])
return defs

View file

@ -97,7 +97,6 @@ class VRayPluginInfo(object):
@attr.s
class ArnoldPluginInfo(object):
ArnoldFile = attr.ib(default=None)
ArnoldVerbose = attr.ib(default=2)
class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
@ -661,12 +660,9 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
# Plugin Info
ass_file, _ = os.path.splitext(data["output_filename_0"])
ass_filepath = ass_file + ".ass"
current_verbosity_level = cmds.getAttr(
"defaultArnoldRenderOptions.log_verbosity")
plugin_info = ArnoldPluginInfo(
ArnoldFile=ass_filepath,
ArnoldVerbose=current_verbosity_level
ArnoldFile=ass_filepath
)
return job_info, attr.asdict(plugin_info)

View file

@ -0,0 +1,506 @@
# -*- coding: utf-8 -*-
"""Submit publishing job to farm."""
import os
import json
import re
from copy import deepcopy
import requests
import pyblish.api
from openpype import AYON_SERVER_ENABLED
from openpype.client import (
get_last_version_by_subset_name,
)
from openpype.pipeline import publish, legacy_io
from openpype.lib import EnumDef, is_running_from_build
from openpype.tests.lib import is_in_tests
from openpype.pipeline.version_start import get_versioning_start
from openpype.pipeline.farm.pyblish_functions import (
create_skeleton_instance_cache,
create_instances_for_cache,
attach_instances_to_subset,
prepare_cache_representations,
create_metadata_path
)
class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
publish.OpenPypePyblishPluginMixin,
publish.ColormanagedPyblishPluginMixin):
"""Process Cache Job submitted on farm
This is replicated version of submit publish job
specifically for cache(s).
These jobs are dependent on a deadline job
submission prior to this plug-in.
- In case of Deadline, it creates dependent job on farm publishing
rendered image sequence.
Options in instance.data:
- deadlineSubmissionJob (dict, Required): The returned .json
data from the job submission to deadline.
- outputDir (str, Required): The output directory where the metadata
file should be generated. It's assumed that this will also be
final folder containing the output files.
- ext (str, Optional): The extension (including `.`) that is required
in the output filename to be picked up for image sequence
publishing.
- expectedFiles (list or dict): explained below
"""
label = "Submit cache jobs to Deadline"
order = pyblish.api.IntegratorOrder + 0.2
icon = "tractor"
targets = ["local"]
hosts = ["houdini"]
families = ["publish.hou"]
environ_job_filter = [
"OPENPYPE_METADATA_FILE"
]
environ_keys = [
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME",
"OPENPYPE_SG_USER",
"KITSU_LOGIN",
"KITSU_PWD"
]
# custom deadline attributes
deadline_department = ""
deadline_pool = ""
deadline_pool_secondary = ""
deadline_group = ""
deadline_chunk_size = 1
deadline_priority = None
# regex for finding frame number in string
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
plugin_pype_version = "3.0"
# script path for publish_filesequence.py
publishing_script = None
def _submit_deadline_post_job(self, instance, job):
"""Submit publish job to Deadline.
Deadline specific code separated from :meth:`process` for sake of
more universal code. Muster post job is sent directly by Muster
submitter, so this type of code isn't necessary for it.
Returns:
(str): deadline_publish_job_id
"""
data = instance.data.copy()
subset = data["subset"]
job_name = "Publish - {subset}".format(subset=subset)
anatomy = instance.context.data['anatomy']
# instance.data.get("subset") != instances[0]["subset"]
# 'Main' vs 'renderMain'
override_version = None
instance_version = instance.data.get("version") # take this if exists
if instance_version != 1:
override_version = instance_version
output_dir = self._get_publish_folder(
anatomy,
deepcopy(instance.data["anatomyData"]),
instance.data.get("asset"),
instance.data["subset"],
instance.context,
instance.data["family"],
override_version
)
# Transfer the environment from the original job to this dependent
# job so they use the same environment
metadata_path, rootless_metadata_path = \
create_metadata_path(instance, anatomy)
environment = {
"AVALON_PROJECT": instance.context.data["projectName"],
"AVALON_ASSET": instance.context.data["asset"],
"AVALON_TASK": instance.context.data["task"],
"OPENPYPE_USERNAME": instance.context.data["user"],
"OPENPYPE_LOG_NO_COLORS": "1",
"IS_TEST": str(int(is_in_tests()))
}
if AYON_SERVER_ENABLED:
environment["AYON_PUBLISH_JOB"] = "1"
environment["AYON_RENDER_JOB"] = "0"
environment["AYON_REMOTE_PUBLISH"] = "0"
environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"]
deadline_plugin = "Ayon"
else:
environment["OPENPYPE_PUBLISH_JOB"] = "1"
environment["OPENPYPE_RENDER_JOB"] = "0"
environment["OPENPYPE_REMOTE_PUBLISH"] = "0"
deadline_plugin = "OpenPype"
# Add OpenPype version if we are running from build.
if is_running_from_build():
self.environ_keys.append("OPENPYPE_VERSION")
# add environments from self.environ_keys
for env_key in self.environ_keys:
if os.getenv(env_key):
environment[env_key] = os.environ[env_key]
# pass environment keys from self.environ_job_filter
job_environ = job["Props"].get("Env", {})
for env_j_key in self.environ_job_filter:
if job_environ.get(env_j_key):
environment[env_j_key] = job_environ[env_j_key]
# Add mongo url if it's enabled
if instance.context.data.get("deadlinePassMongoUrl"):
mongo_url = os.environ.get("OPENPYPE_MONGO")
if mongo_url:
environment["OPENPYPE_MONGO"] = mongo_url
priority = self.deadline_priority or instance.data.get("priority", 50)
instance_settings = self.get_attr_values_from_data(instance.data)
initial_status = instance_settings.get("publishJobState", "Active")
# TODO: Remove this backwards compatibility of `suspend_publish`
if instance.data.get("suspend_publish"):
initial_status = "Suspended"
args = [
"--headless",
'publish',
'"{}"'.format(rootless_metadata_path),
"--targets", "deadline",
"--targets", "farm"
]
if is_in_tests():
args.append("--automatic-tests")
# Generate the payload for Deadline submission
secondary_pool = (
self.deadline_pool_secondary or instance.data.get("secondaryPool")
)
payload = {
"JobInfo": {
"Plugin": deadline_plugin,
"BatchName": job["Props"]["Batch"],
"Name": job_name,
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"Department": self.deadline_department,
"ChunkSize": self.deadline_chunk_size,
"Priority": priority,
"InitialStatus": initial_status,
"Group": self.deadline_group,
"Pool": self.deadline_pool or instance.data.get("primaryPool"),
"SecondaryPool": secondary_pool,
# ensure the outputdirectory with correct slashes
"OutputDirectory0": output_dir.replace("\\", "/")
},
"PluginInfo": {
"Version": self.plugin_pype_version,
"Arguments": " ".join(args),
"SingleFrameOnly": "True",
},
# Mandatory for Deadline, may be empty
"AuxFiles": [],
}
if job.get("_id"):
payload["JobInfo"]["JobDependency0"] = job["_id"]
for index, (key_, value_) in enumerate(environment.items()):
payload["JobInfo"].update(
{
"EnvironmentKeyValue%d"
% index: "{key}={value}".format(
key=key_, value=value_
)
}
)
# remove secondary pool
payload["JobInfo"].pop("SecondaryPool", None)
self.log.debug("Submitting Deadline publish job ...")
url = "{}/api/jobs".format(self.deadline_url)
response = requests.post(url, json=payload, timeout=10)
if not response.ok:
raise Exception(response.text)
deadline_publish_job_id = response.json()["_id"]
return deadline_publish_job_id
def process(self, instance):
# type: (pyblish.api.Instance) -> None
"""Process plugin.
Detect type of render farm submission and create and post dependent
job in case of Deadline. It creates json file with metadata needed for
publishing in directory of render.
Args:
instance (pyblish.api.Instance): Instance data.
"""
if not instance.data.get("farm"):
self.log.debug("Skipping local instance.")
return
anatomy = instance.context.data["anatomy"]
instance_skeleton_data = create_skeleton_instance_cache(instance)
"""
if content of `expectedFiles` list are dictionaries, we will handle
it as list of AOVs, creating instance for every one of them.
Example:
--------
expectedFiles = [
{
"beauty": [
"foo_v01.0001.exr",
"foo_v01.0002.exr"
],
"Z": [
"boo_v01.0001.exr",
"boo_v01.0002.exr"
]
}
]
This will create instances for `beauty` and `Z` subset
adding those files to their respective representations.
If we have only list of files, we collect all file sequences.
More then one doesn't probably make sense, but we'll handle it
like creating one instance with multiple representations.
Example:
--------
expectedFiles = [
"foo_v01.0001.exr",
"foo_v01.0002.exr",
"xxx_v01.0001.exr",
"xxx_v01.0002.exr"
]
This will result in one instance with two representations:
`foo` and `xxx`
"""
if isinstance(instance.data.get("expectedFiles")[0], dict):
instances = create_instances_for_cache(
instance, instance_skeleton_data)
else:
representations = prepare_cache_representations(
instance_skeleton_data,
instance.data.get("expectedFiles"),
anatomy
)
if "representations" not in instance_skeleton_data.keys():
instance_skeleton_data["representations"] = []
# add representation
instance_skeleton_data["representations"] += representations
instances = [instance_skeleton_data]
# attach instances to subset
if instance.data.get("attachTo"):
instances = attach_instances_to_subset(
instance.data.get("attachTo"), instances
)
r''' SUBMiT PUBLiSH JOB 2 D34DLiN3
____
' ' .---. .---. .--. .---. .--..--..--..--. .---.
| | --= \ | . \/ _|/ \| . \ || || \ |/ _|
| JOB | --= / | | || __| .. | | | |;_ || \ || __|
| | |____./ \.__|._||_.|___./|_____|||__|\__|\.___|
._____.
'''
render_job = None
submission_type = ""
if instance.data.get("toBeRenderedOn") == "deadline":
render_job = instance.data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"
if not render_job:
import getpass
render_job = {}
self.log.debug("Faking job data ...")
render_job["Props"] = {}
# Render job doesn't exist because we do not have prior submission.
# We still use data from it so lets fake it.
#
# Batch name reflect original scene name
if instance.data.get("assemblySubmissionJobs"):
render_job["Props"]["Batch"] = instance.data.get(
"jobBatchName")
else:
batch = os.path.splitext(os.path.basename(
instance.context.data.get("currentFile")))[0]
render_job["Props"]["Batch"] = batch
# User is deadline user
render_job["Props"]["User"] = instance.context.data.get(
"deadlineUser", getpass.getuser())
deadline_publish_job_id = None
if submission_type == "deadline":
# get default deadline webservice url from deadline module
self.deadline_url = instance.context.data["defaultDeadline"]
# if custom one is set in instance, use that
if instance.data.get("deadlineUrl"):
self.deadline_url = instance.data.get("deadlineUrl")
assert self.deadline_url, "Requires Deadline Webservice URL"
deadline_publish_job_id = \
self._submit_deadline_post_job(instance, render_job)
# Inject deadline url to instances.
for inst in instances:
inst["deadlineUrl"] = self.deadline_url
# publish job file
publish_job = {
"asset": instance_skeleton_data["asset"],
"frameStart": instance_skeleton_data["frameStart"],
"frameEnd": instance_skeleton_data["frameEnd"],
"fps": instance_skeleton_data["fps"],
"source": instance_skeleton_data["source"],
"user": instance.context.data["user"],
"version": instance.context.data["version"], # workfile version
"intent": instance.context.data.get("intent"),
"comment": instance.context.data.get("comment"),
"job": render_job or None,
"session": legacy_io.Session.copy(),
"instances": instances
}
if deadline_publish_job_id:
publish_job["deadline_publish_job_id"] = deadline_publish_job_id
metadata_path, rootless_metadata_path = \
create_metadata_path(instance, anatomy)
with open(metadata_path, "w") as f:
json.dump(publish_job, f, indent=4, sort_keys=True)
def _get_publish_folder(self, anatomy, template_data,
asset, subset, context,
family, version=None):
"""
Extracted logic to pre-calculate real publish folder, which is
calculated in IntegrateNew inside of Deadline process.
This should match logic in:
'collect_anatomy_instance_data' - to
get correct anatomy, family, version for subset and
'collect_resources_path'
get publish_path
Args:
anatomy (openpype.pipeline.anatomy.Anatomy):
template_data (dict): pre-calculated collected data for process
asset (string): asset name
subset (string): subset name (actually group name of subset)
family (string): for current deadline process it's always 'render'
TODO - for generic use family needs to be dynamically
calculated like IntegrateNew does
version (int): override version from instance if exists
Returns:
(string): publish folder where rendered and published files will
be stored
based on 'publish' template
"""
project_name = context.data["projectName"]
if not version:
version = get_last_version_by_subset_name(
project_name,
subset,
asset_name=asset
)
if version:
version = int(version["name"]) + 1
else:
version = get_versioning_start(
project_name,
template_data["app"],
task_name=template_data["task"]["name"],
task_type=template_data["task"]["type"],
family="render",
subset=subset,
project_settings=context.data["project_settings"]
)
host_name = context.data["hostName"]
task_info = template_data.get("task") or {}
template_name = publish.get_publish_template_name(
project_name,
host_name,
family,
task_info.get("name"),
task_info.get("type"),
)
template_data["subset"] = subset
template_data["family"] = family
template_data["version"] = version
render_templates = anatomy.templates_obj[template_name]
if "folder" in render_templates:
publish_folder = render_templates["folder"].format_strict(
template_data
)
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = render_templates["path"].format_strict(template_data)
publish_folder = os.path.dirname(file_path)
return publish_folder
@classmethod
def get_attribute_defs(cls):
return [
EnumDef("publishJobState",
label="Publish Job State",
items=["Active", "Suspended"],
default="Active")
]

View file

@ -22,7 +22,8 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
"render.frames_farm",
"renderFarm",
"renderlayer",
"maxrender"]
"maxrender",
"publish.hou"]
optional = True
# cache

View file

@ -61,6 +61,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
additional_metadata_keys = []
def process(self, instance):
# QUESTION: should this be operating even for `farm` target?
self.log.debug("instance {}".format(instance))
instance_repres = instance.data.get("representations")
@ -143,70 +144,93 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
unmanaged_location_name = "ftrack.unmanaged"
ftrack_server_location_name = "ftrack.server"
# check if any outputName keys are in review_representations
# also check if any outputName keys are in thumbnail_representations
synced_multiple_output_names = []
for review_repre in review_representations:
review_output_name = review_repre.get("outputName")
if not review_output_name:
continue
for thumb_repre in thumbnail_representations:
thumb_output_name = thumb_repre.get("outputName")
if not thumb_output_name:
continue
if (
thumb_output_name == review_output_name
# output name can be added also as tags during intermediate
# files creation
or thumb_output_name in review_repre.get("tags", [])
):
synced_multiple_output_names.append(
thumb_repre["outputName"])
self.log.debug("Multiple output names: {}".format(
synced_multiple_output_names
))
multiple_synced_thumbnails = len(synced_multiple_output_names) > 1
# Components data
component_list = []
# Components that will be duplicated to unmanaged location
src_components_to_add = []
thumbnail_data_items = []
# Create thumbnail components
# TODO what if there is multiple thumbnails?
first_thumbnail_component = None
first_thumbnail_component_repre = None
if not review_representations or has_movie_review:
for repre in thumbnail_representations:
repre_path = get_publish_repre_path(instance, repre, False)
if not repre_path:
self.log.warning(
"Published path is not set and source was removed."
)
continue
# Create copy of base comp item and append it
thumbnail_item = copy.deepcopy(base_component_item)
thumbnail_item["component_path"] = repre_path
thumbnail_item["component_data"] = {
"name": "thumbnail"
}
thumbnail_item["thumbnail"] = True
# Create copy of item before setting location
if "delete" not in repre.get("tags", []):
src_components_to_add.append(copy.deepcopy(thumbnail_item))
# Create copy of first thumbnail
if first_thumbnail_component is None:
first_thumbnail_component_repre = repre
first_thumbnail_component = thumbnail_item
# Set location
thumbnail_item["component_location_name"] = (
ftrack_server_location_name
)
# Add item to component list
component_list.append(thumbnail_item)
if first_thumbnail_component is not None:
metadata = self._prepare_image_component_metadata(
first_thumbnail_component_repre,
first_thumbnail_component["component_path"]
for repre in thumbnail_representations:
# get repre path from representation
# and return published_path if available
# the path is validated and if it does not exists it returns None
repre_path = get_publish_repre_path(
instance,
repre,
only_published=False
)
if not repre_path:
self.log.warning(
"Published path is not set or source was removed."
)
continue
if metadata:
component_data = first_thumbnail_component["component_data"]
component_data["metadata"] = metadata
# Create copy of base comp item and append it
thumbnail_item = copy.deepcopy(base_component_item)
thumbnail_item.update({
"component_path": repre_path,
"component_data": {
"name": (
"thumbnail" if review_representations
else "ftrackreview-image"
),
"metadata": self._prepare_image_component_metadata(
repre,
repre_path
)
},
"thumbnail": True,
"component_location_name": ftrack_server_location_name
})
if review_representations:
component_data["name"] = "thumbnail"
else:
component_data["name"] = "ftrackreview-image"
# add thumbnail data to items for future synchronization
current_item_data = {
"sync_key": repre.get("outputName"),
"representation": repre,
"item": thumbnail_item
}
# Create copy of item before setting location
if "delete" not in repre.get("tags", []):
src_comp = self._create_src_component(
instance,
repre,
copy.deepcopy(thumbnail_item),
unmanaged_location_name
)
component_list.append(src_comp)
current_item_data["src_component"] = src_comp
# Add item to component list
thumbnail_data_items.append(current_item_data)
# Create review components
# Change asset name of each new component for review
is_first_review_repre = True
not_first_components = []
extended_asset_name = ""
multiple_reviewable = len(review_representations) > 1
for repre in review_representations:
for index, repre in enumerate(review_representations):
if not self._is_repre_video(repre) and has_movie_review:
self.log.debug("Movie repre has priority "
"from {}".format(repre))
@ -222,45 +246,50 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
# Create copy of base comp item and append it
review_item = copy.deepcopy(base_component_item)
# get asset name and define extended name variant
asset_name = review_item["asset_data"]["name"]
extended_asset_name = "_".join(
(asset_name, repre["name"])
# get first or synchronize thumbnail item
sync_thumbnail_item = None
sync_thumbnail_item_src = None
sync_thumbnail_data = self._get_matching_thumbnail_item(
repre,
thumbnail_data_items,
multiple_synced_thumbnails
)
if sync_thumbnail_data:
sync_thumbnail_item = sync_thumbnail_data.get("item")
sync_thumbnail_item_src = sync_thumbnail_data.get(
"src_component")
# reset extended if no need for extended asset name
if (
self.keep_first_subset_name_for_review
and is_first_review_repre
):
extended_asset_name = ""
else:
# only rename if multiple reviewable
if multiple_reviewable:
review_item["asset_data"]["name"] = extended_asset_name
else:
extended_asset_name = ""
"""
Renaming asset name only to those components which are explicitly
allowed in settings. Usually clients wanted to keep first component
as untouched product name with version and any other assetVersion
to be named with extended form. The renaming will only happen if
there is more than one reviewable component and extended name is
not empty.
"""
extended_asset_name = self._make_extended_component_name(
base_component_item, repre, index)
# rename all already created components
# only if first repre and extended name available
if is_first_review_repre and extended_asset_name:
# and rename all already created components
for _ci in component_list:
_ci["asset_data"]["name"] = extended_asset_name
if multiple_reviewable and extended_asset_name:
review_item["asset_data"]["name"] = extended_asset_name
# rename also thumbnail
if sync_thumbnail_item:
sync_thumbnail_item["asset_data"]["name"] = (
extended_asset_name
)
# rename also src_thumbnail
if sync_thumbnail_item_src:
sync_thumbnail_item_src["asset_data"]["name"] = (
extended_asset_name
)
# and rename all already created src components
for _sci in src_components_to_add:
_sci["asset_data"]["name"] = extended_asset_name
# rename also first thumbnail component if any
if first_thumbnail_component is not None:
first_thumbnail_component[
"asset_data"]["name"] = extended_asset_name
# Change location
review_item["component_path"] = repre_path
# Change component data
# adding thumbnail component to component list
if sync_thumbnail_item:
component_list.append(copy.deepcopy(sync_thumbnail_item))
if sync_thumbnail_item_src:
component_list.append(copy.deepcopy(sync_thumbnail_item_src))
# add metadata to review component
if self._is_repre_video(repre):
component_name = "ftrackreview-mp4"
metadata = self._prepare_video_component_metadata(
@ -273,28 +302,29 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
review_item["thumbnail"] = True
review_item["component_data"] = {
# Default component name is "main".
"name": component_name,
"metadata": metadata
}
if is_first_review_repre:
is_first_review_repre = False
else:
# later detection for thumbnail duplication
not_first_components.append(review_item)
review_item.update({
"component_path": repre_path,
"component_data": {
"name": component_name,
"metadata": metadata
},
"component_location_name": ftrack_server_location_name
})
# Create copy of item before setting location
if "delete" not in repre.get("tags", []):
src_components_to_add.append(copy.deepcopy(review_item))
src_comp = self._create_src_component(
instance,
repre,
copy.deepcopy(review_item),
unmanaged_location_name
)
component_list.append(src_comp)
# Set location
review_item["component_location_name"] = (
ftrack_server_location_name
)
# Add item to component list
component_list.append(review_item)
if self.upload_reviewable_with_origin_name:
origin_name_component = copy.deepcopy(review_item)
filename = os.path.basename(repre_path)
@ -303,34 +333,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
component_list.append(origin_name_component)
# Duplicate thumbnail component for all not first reviews
if first_thumbnail_component is not None:
for component_item in not_first_components:
asset_name = component_item["asset_data"]["name"]
new_thumbnail_component = copy.deepcopy(
first_thumbnail_component
)
new_thumbnail_component["asset_data"]["name"] = asset_name
new_thumbnail_component["component_location_name"] = (
ftrack_server_location_name
)
component_list.append(new_thumbnail_component)
# Add source components for review and thubmnail components
for copy_src_item in src_components_to_add:
# Make sure thumbnail is disabled
copy_src_item["thumbnail"] = False
# Set location
copy_src_item["component_location_name"] = unmanaged_location_name
# Modify name of component to have suffix "_src"
component_data = copy_src_item["component_data"]
component_name = component_data["name"]
component_data["name"] = component_name + "_src"
component_data["metadata"] = self._prepare_component_metadata(
instance, repre, copy_src_item["component_path"], False
)
component_list.append(copy_src_item)
# Add others representations as component
for repre in other_representations:
published_path = get_publish_repre_path(instance, repre, True)
@ -346,15 +348,17 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
):
other_item["asset_data"]["name"] = extended_asset_name
component_data = {
"name": repre["name"],
"metadata": self._prepare_component_metadata(
instance, repre, published_path, False
)
}
other_item["component_data"] = component_data
other_item["component_location_name"] = unmanaged_location_name
other_item["component_path"] = published_path
other_item.update({
"component_path": published_path,
"component_data": {
"name": repre["name"],
"metadata": self._prepare_component_metadata(
instance, repre, published_path, False
)
},
"component_location_name": unmanaged_location_name,
})
component_list.append(other_item)
def json_obj_parser(obj):
@ -370,6 +374,124 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
))
instance.data["ftrackComponentsList"] = component_list
def _get_matching_thumbnail_item(
self,
review_representation,
thumbnail_data_items,
are_multiple_synced_thumbnails
):
"""Return matching thumbnail item from list of thumbnail items.
If a thumbnail item already exists, this should return it.
The benefit is that if an `outputName` key is found in
representation and is also used as a `sync_key` in a thumbnail
data item, it can sync with that item.
Args:
review_representation (dict): Review representation
thumbnail_data_items (list): List of thumbnail data items
are_multiple_synced_thumbnails (bool): If there are multiple synced
thumbnails
Returns:
dict: Thumbnail data item or empty dict
"""
output_name = review_representation.get("outputName")
tags = review_representation.get("tags", [])
matching_thumbnail_item = {}
for thumb_item in thumbnail_data_items:
if (
are_multiple_synced_thumbnails
and (
thumb_item["sync_key"] == output_name
# intermediate files can have preset name in tags
# this is usually aligned with `outputName` distributed
# during thumbnail creation in `need_thumbnail` tagging
# workflow
or thumb_item["sync_key"] in tags
)
):
# return only synchronized thumbnail if multiple
matching_thumbnail_item = thumb_item
break
elif not are_multiple_synced_thumbnails:
# return any first found thumbnail since we need thumbnail
# but dont care which one
matching_thumbnail_item = thumb_item
break
if not matching_thumbnail_item:
# WARNING: this can only happen if multiple thumbnails
# workflow is broken, since it found multiple matching outputName
# in representation but they do not align with any thumbnail item
self.log.warning(
"No matching thumbnail item found for output name "
"'{}'".format(output_name)
)
if not thumbnail_data_items:
self.log.warning(
"No thumbnail data items found"
)
return {}
# as fallback return first thumbnail item
return thumbnail_data_items[0]
return matching_thumbnail_item
def _make_extended_component_name(
self, component_item, repre, iteration_index):
""" Returns the extended component name
Name is based on the asset name and representation name.
Args:
component_item (dict): The component item dictionary.
repre (dict): The representation dictionary.
iteration_index (int): The index of the iteration.
Returns:
str: The extended component name.
"""
# reset extended if no need for extended asset name
if self.keep_first_subset_name_for_review and iteration_index == 0:
return
# get asset name and define extended name variant
asset_name = component_item["asset_data"]["name"]
return "_".join(
(asset_name, repre["name"])
)
def _create_src_component(
self, instance, repre, component_item, location):
"""Create src component for thumbnail.
This will replicate the input component and change its name to
have suffix "_src".
Args:
instance (pyblish.api.Instance): Instance
repre (dict): Representation
component_item (dict): Component item
location (str): Location name
Returns:
dict: Component item
"""
# Make sure thumbnail is disabled
component_item["thumbnail"] = False
# Set location
component_item["component_location_name"] = location
# Modify name of component to have suffix "_src"
component_data = component_item["component_data"]
component_name = component_data["name"]
component_data["name"] = component_name + "_src"
component_data["metadata"] = self._prepare_component_metadata(
instance, repre, component_item["component_path"], False
)
return component_item
def _collect_additional_metadata(self, streams):
pass
@ -472,9 +594,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
stream_width = tmp_width
stream_height = tmp_height
self.log.debug("FPS from stream is {} and duration is {}".format(
input_framerate, stream_duration
))
frame_out = float(stream_duration) * stream_fps
break

View file

@ -745,6 +745,238 @@ def get_resources(project_name, version, extension=None):
return resources
def create_skeleton_instance_cache(instance):
# type: (pyblish.api.Instance, list, dict) -> dict
"""Create skeleton instance from original instance data.
This will create dictionary containing skeleton
- common - data used for publishing rendered instances.
This skeleton instance is then extended with additional data
and serialized to be processed by farm job.
Args:
instance (pyblish.api.Instance): Original instance to
be used as a source of data.
Returns:
dict: Dictionary with skeleton instance data.
"""
# list of family names to transfer to new family if present
context = instance.context
data = instance.data.copy()
anatomy = instance.context.data["anatomy"] # type: Anatomy
# get time related data from instance (or context)
time_data = get_time_data_from_instance_or_context(instance)
if data.get("extendFrames", False):
time_data.start, time_data.end = extend_frames(
data["asset"],
data["subset"],
time_data.start,
time_data.end,
)
source = data.get("source") or context.data.get("currentFile")
success, rootless_path = (
anatomy.find_root_template_from_path(source)
)
if success:
source = rootless_path
else:
# `rootless_path` is not set to `source` if none of roots match
log = Logger.get_logger("farm_publishing")
log.warning(("Could not find root path for remapping \"{}\". "
"This may cause issues.").format(source))
family = instance.data["family"]
# Make sure "render" is in the families to go through
# validating expected and rendered files
# during publishing job.
families = ["render", family]
instance_skeleton_data = {
"family": family,
"subset": data["subset"],
"families": families,
"asset": data["asset"],
"frameStart": time_data.start,
"frameEnd": time_data.end,
"handleStart": time_data.handle_start,
"handleEnd": time_data.handle_end,
"frameStartHandle": time_data.start - time_data.handle_start,
"frameEndHandle": time_data.end + time_data.handle_end,
"comment": data.get("comment"),
"fps": time_data.fps,
"source": source,
"extendFrames": data.get("extendFrames"),
"overrideExistingFrame": data.get("overrideExistingFrame"),
"jobBatchName": data.get("jobBatchName", ""),
# map inputVersions `ObjectId` -> `str` so json supports it
"inputVersions": list(map(str, data.get("inputVersions", []))),
}
# skip locking version if we are creating v01
instance_version = data.get("version") # take this if exists
if instance_version != 1:
instance_skeleton_data["version"] = instance_version
representations = get_transferable_representations(instance)
instance_skeleton_data["representations"] = representations
persistent = instance.data.get("stagingDir_persistent") is True
instance_skeleton_data["stagingDir_persistent"] = persistent
return instance_skeleton_data
def prepare_cache_representations(skeleton_data, exp_files, anatomy):
"""Create representations for file sequences.
This will return representations of expected files if they are not
in hierarchy of aovs. There should be only one sequence of files for
most cases, but if not - we create representation from each of them.
Arguments:
skeleton_data (dict): instance data for which we are
setting representations
exp_files (list): list of expected files
anatomy (Anatomy)
Returns:
list of representations
"""
representations = []
collections, remainders = clique.assemble(exp_files)
log = Logger.get_logger("farm_publishing")
# create representation for every collected sequence
for collection in collections:
ext = collection.tail.lstrip(".")
staging = os.path.dirname(list(collection)[0])
success, rootless_staging_dir = (
anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
frame_start = int(skeleton_data.get("frameStartHandle"))
rep = {
"name": ext,
"ext": ext,
"files": [os.path.basename(f) for f in list(collection)],
"frameStart": frame_start,
"frameEnd": int(skeleton_data.get("frameEndHandle")),
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"fps": skeleton_data.get("fps")
}
representations.append(rep)
return representations
def create_instances_for_cache(instance, skeleton):
"""Create instance for cache.
This will create new instance for every AOV it can detect in expected
files list.
Args:
instance (pyblish.api.Instance): Original instance.
skeleton (dict): Skeleton data for instance (those needed) later
by collector.
Returns:
list of instances
Throws:
ValueError:
"""
anatomy = instance.context.data["anatomy"]
subset = skeleton["subset"]
family = skeleton["family"]
exp_files = instance.data["expectedFiles"]
log = Logger.get_logger("farm_publishing")
instances = []
# go through AOVs in expected files
for _, files in exp_files[0].items():
cols, rem = clique.assemble(files)
# we shouldn't have any reminders. And if we do, it should
# be just one item for single frame renders.
if not cols and rem:
if len(rem) != 1:
raise ValueError("Found multiple non related files "
"to render, don't know what to do "
"with them.")
col = rem[0]
ext = os.path.splitext(col)[1].lstrip(".")
else:
# but we really expect only one collection.
# Nothing else make sense.
if len(cols) != 1:
raise ValueError("Only one image sequence type is expected.") # noqa: E501
ext = cols[0].tail.lstrip(".")
col = list(cols[0])
if isinstance(col, (list, tuple)):
staging = os.path.dirname(col[0])
else:
staging = os.path.dirname(col)
try:
staging = remap_source(staging, anatomy)
except ValueError as e:
log.warning(e)
new_instance = deepcopy(skeleton)
new_instance["subset"] = subset
log.info("Creating data for: {}".format(subset))
new_instance["family"] = family
new_instance["families"] = skeleton["families"]
# create representation
if isinstance(col, (list, tuple)):
files = [os.path.basename(f) for f in col]
else:
files = os.path.basename(col)
rep = {
"name": ext,
"ext": ext,
"files": files,
"frameStart": int(skeleton["frameStartHandle"]),
"frameEnd": int(skeleton["frameEndHandle"]),
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"fps": new_instance.get("fps"),
"tags": [],
}
new_instance["representations"] = [rep]
# if extending frames from existing version, copy files from there
# into our destination directory
if new_instance.get("extendFrames", False):
copy_extend_frames(new_instance, rep)
instances.append(new_instance)
log.debug("instances:{}".format(instances))
return instances
def copy_extend_frames(instance, representation):
"""Copy existing frames from latest version.

View file

@ -109,6 +109,14 @@
"chunk_size": 10,
"group": "none"
},
"ProcessSubmittedCacheJobOnFarm": {
"enabled": true,
"deadline_department": "",
"deadline_pool": "",
"deadline_group": "",
"deadline_chunk_size": 1,
"deadline_priority": 50
},
"ProcessSubmittedJobOnFarm": {
"enabled": true,
"deadline_department": "",

View file

@ -137,6 +137,11 @@
}
},
"publish": {
"CollectChunkSize": {
"enabled": true,
"optional": true,
"chunk_size": 999999
},
"CollectAssetHandles": {
"use_asset_handles": true
},

View file

@ -210,5 +210,8 @@
"darwin": "",
"linux": ""
}
},
"asset_reporter": {
"enabled": false
}
}

View file

@ -584,6 +584,46 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ProcessSubmittedCacheJobOnFarm",
"label": "ProcessSubmittedCacheJobOnFarm",
"checkbox_key": "enabled",
"is_group": true,
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "text",
"key": "deadline_department",
"label": "Deadline department"
},
{
"type": "text",
"key": "deadline_pool",
"label": "Deadline Pool"
},
{
"type": "text",
"key": "deadline_group",
"label": "Deadline Group"
},
{
"type": "number",
"key": "deadline_chunk_size",
"label": "Deadline Chunk Size"
},
{
"type": "number",
"key": "deadline_priority",
"label": "Deadline Priotity"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -55,6 +55,31 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"checkbox_key": "enabled",
"key": "CollectChunkSize",
"label": "Collect Chunk Size",
"is_group": true,
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "number",
"key": "chunk_size",
"label": "Frames Per Task"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -355,6 +355,20 @@
{
"type": "dynamic_schema",
"name": "system_settings/modules"
},
{
"type": "dict",
"key": "asset_reporter",
"label": "Asset Usage Reporter",
"collapsible": true,
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
}
]
}
]
}

View file

@ -172,7 +172,7 @@ def save_studio_settings(data):
clear_metadata_from_settings(new_data)
changes = calculate_changes(old_data, new_data)
modules_manager = ModulesManager(_system_settings=new_data)
modules_manager = ModulesManager(new_data)
warnings = []
for module in modules_manager.get_enabled_modules():

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.17.7-nightly.2"
__version__ = "3.17.7-nightly.3"

View file

@ -248,6 +248,17 @@ class AOVFilterSubmodel(BaseSettingsModel):
)
class ProcessCacheJobFarmModel(BaseSettingsModel):
"""Process submitted job on farm."""
enabled: bool = Field(title="Enabled")
deadline_department: str = Field(title="Department")
deadline_pool: str = Field(title="Pool")
deadline_group: str = Field(title="Group")
deadline_chunk_size: int = Field(title="Chunk Size")
deadline_priority: int = Field(title="Priority")
class ProcessSubmittedJobOnFarmModel(BaseSettingsModel):
"""Process submitted job on farm."""
@ -311,6 +322,9 @@ class PublishPluginsModel(BaseSettingsModel):
BlenderSubmitDeadline: BlenderSubmitDeadlineModel = Field(
default_factory=BlenderSubmitDeadlineModel,
title="Blender Submit Deadline")
ProcessSubmittedCacheJobOnFarm: ProcessCacheJobFarmModel = Field(
default_factory=ProcessCacheJobFarmModel,
title="Process submitted cache Job on farm.")
ProcessSubmittedJobOnFarm: ProcessSubmittedJobOnFarmModel = Field(
default_factory=ProcessSubmittedJobOnFarmModel,
title="Process submitted job on farm.")
@ -426,6 +440,14 @@ DEFAULT_DEADLINE_PLUGINS_SETTINGS = {
"chunk_size": 10,
"group": "none"
},
"ProcessSubmittedCacheJobOnFarm": {
"enabled": True,
"deadline_department": "",
"deadline_pool": "",
"deadline_group": "",
"deadline_chunk_size": 1,
"deadline_priority": 50
},
"ProcessSubmittedJobOnFarm": {
"enabled": True,
"deadline_department": "",

View file

@ -1 +1 @@
__version__ = "0.1.3"
__version__ = "0.1.4"

View file

@ -85,3 +85,11 @@ def pytest_runtest_makereport(item, call):
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
# In the event of module scoped fixtures, also mark failure in module.
module = item
while module is not None and not isinstance(module, pytest.Module):
module = module.parent
if module is not None:
if rep.when == 'call' and (rep.failed or rep.skipped):
module.module_test_failure = True

View file

@ -218,11 +218,7 @@ class ModuleUnitTest(BaseTest):
yield mongo_client[self.TEST_OPENPYPE_NAME]["settings"]
def is_test_failed(self, request):
# if request.node doesn't have rep_call, something failed
try:
return request.node.rep_call.failed
except AttributeError:
return True
return getattr(request.node, "module_test_failure", False)
class PublishTest(ModuleUnitTest):

View file

@ -83,6 +83,30 @@ select your render camera.
All the render outputs are stored in the pyblish/render directory within your project path.\
For Karma-specific render, it also outputs the USD render as default.
## Publishing cache to Deadline
Artist can publish cache to deadline which increases productivity as artist can use local machine
could be used for other tasks.
Caching on the farm is supported for:
**Arnold ASS (.ass)**
**Pointcache (.bgeo and .abc)**
**VDB (.vdb)**
**Redshift Proxy (.rs)**
To submit your cache to deadline, you need to create the instance(s) with clicking
**Submitting to Farm** and you can also enable **Use selection** to
select the object for caching in farm.
![Houdini Farm Cache Creator](assets/houdini_farm_cache_creator.png)
When you go to Publish Tab and click the instance(s), you can set up your preferred
**Frame per task**.
![Houdini Farm Per Task](assets/houdini_frame_per_task.png)
Once you hit **Publish**, the cache would be submitted and rendered in deadline.
When the render is finished, all the caches would be located in your publish folder.
You can see them in the Loader.
![Houdini Farm Per Task](assets/houdini_farm_cache_loader.png)
## USD (experimental support)
### Publishing USD
You can publish your Solaris Stage as USD file.

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 132 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB