mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
remove unrelated codes to this pull request
This commit is contained in:
parent
2c18900940
commit
5e5db076d1
32 changed files with 1380 additions and 107 deletions
|
|
@ -138,6 +138,9 @@ def get_output_parameter(node):
|
|||
return node.parm("ar_ass_file")
|
||||
elif node_type == "Redshift_Proxy_Output":
|
||||
return node.parm("RS_archive_file")
|
||||
elif node_type == "ifd":
|
||||
if node.evalParm("soho_outputmode"):
|
||||
return node.parm("soho_diskfile")
|
||||
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
|
@ -649,50 +652,3 @@ def get_color_management_preferences():
|
|||
"display": hou.Color.ocio_defaultDisplay(),
|
||||
"view": hou.Color.ocio_defaultView()
|
||||
}
|
||||
|
||||
|
||||
def get_resolution_from_doc(doc):
|
||||
"""Get resolution from the given asset document. """
|
||||
|
||||
if not doc or "data" not in doc:
|
||||
print("Entered document is not valid. \"{}\"".format(str(doc)))
|
||||
return None
|
||||
|
||||
resolution_width = doc["data"].get("resolutionWidth")
|
||||
resolution_height = doc["data"].get("resolutionHeight")
|
||||
|
||||
# Make sure both width and height are set
|
||||
if resolution_width is None or resolution_height is None:
|
||||
print("No resolution information found for \"{}\"".format(doc["name"]))
|
||||
return None
|
||||
|
||||
return int(resolution_width), int(resolution_height)
|
||||
|
||||
|
||||
def set_camera_resolution(camera, asset_doc=None):
|
||||
"""Apply resolution to camera from asset document of the publish"""
|
||||
|
||||
if not asset_doc:
|
||||
asset_doc = get_current_project_asset()
|
||||
|
||||
resolution = get_resolution_from_doc(asset_doc)
|
||||
|
||||
if resolution:
|
||||
print("Setting camera resolution: {} -> {}x{}".format(
|
||||
camera.name(), resolution[0], resolution[1]
|
||||
))
|
||||
camera.parm("resx").set(resolution[0])
|
||||
camera.parm("resy").set(resolution[1])
|
||||
|
||||
|
||||
def get_camera_from_container(container):
|
||||
"""Get camera from container node. """
|
||||
|
||||
cameras = container.recursiveGlob(
|
||||
"*",
|
||||
filter=hou.nodeTypeFilter.ObjCamera,
|
||||
include_subnets=False
|
||||
)
|
||||
|
||||
assert len(cameras) == 1, "Camera instance must have only one camera"
|
||||
return cameras[0]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating Arnold ASS files."""
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
|
||||
class CreateArnoldAss(plugin.HoudiniCreator):
|
||||
|
|
@ -21,6 +22,9 @@ class CreateArnoldAss(plugin.HoudiniCreator):
|
|||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "arnold"})
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
creator_attributes["farm"] = pre_create_data["farm"]
|
||||
|
||||
instance = super(CreateArnoldAss, self).create(
|
||||
subset_name,
|
||||
|
|
@ -52,3 +56,15 @@ class CreateArnoldAss(plugin.HoudiniCreator):
|
|||
# Lock any parameters in this list
|
||||
to_lock = ["ar_ass_export_enable", "family", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
# Use same attributes as for instance attributes
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
"""Creator plugin for creating pointcache bgeo files."""
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.pipeline import CreatedInstance, CreatorError
|
||||
from openpype.lib import EnumDef
|
||||
from openpype.lib import EnumDef, BoolDef
|
||||
|
||||
|
||||
class CreateBGEO(plugin.HoudiniCreator):
|
||||
|
|
@ -18,6 +18,9 @@ class CreateBGEO(plugin.HoudiniCreator):
|
|||
instance_data.pop("active", None)
|
||||
|
||||
instance_data.update({"node_type": "geometry"})
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
creator_attributes["farm"] = pre_create_data["farm"]
|
||||
|
||||
instance = super(CreateBGEO, self).create(
|
||||
subset_name,
|
||||
|
|
@ -58,6 +61,13 @@ class CreateBGEO(plugin.HoudiniCreator):
|
|||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
bgeo_enum = [
|
||||
|
|
@ -88,5 +98,5 @@ class CreateBGEO(plugin.HoudiniCreator):
|
|||
]
|
||||
|
||||
return attrs + [
|
||||
EnumDef("bgeo_type", bgeo_enum, label="BGEO Options"),
|
||||
]
|
||||
EnumDef("bgeo_type", bgeo_enum, label="BGEO Options")
|
||||
] + self.get_instance_attr_defs()
|
||||
|
|
|
|||
56
openpype/hosts/houdini/plugins/create/create_mantra_ifd.py
Normal file
56
openpype/hosts/houdini/plugins/create/create_mantra_ifd.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating pointcache alembics."""
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
|
||||
class CreateMantraIFD(plugin.HoudiniCreator):
|
||||
"""Mantra .ifd Archive"""
|
||||
identifier = "io.openpype.creators.houdini.mantraifd"
|
||||
label = "Mantra IFD"
|
||||
family = "mantraifd"
|
||||
icon = "gears"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "ifd"})
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
creator_attributes["farm"] = pre_create_data["farm"]
|
||||
instance = super(CreateMantraIFD, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
filepath = "{}{}".format(
|
||||
hou.text.expandString("$HIP/pyblish/"),
|
||||
"{}.$F4.ifd".format(subset_name))
|
||||
parms = {
|
||||
# Render frame range
|
||||
"trange": 1,
|
||||
# Arnold ROP settings
|
||||
"soho_diskfile": filepath,
|
||||
"soho_outputmode": 1
|
||||
}
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Lock any parameters in this list
|
||||
to_lock = ["soho_outputmode", "family", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
# Use same attributes as for instance attributes
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
@ -1,10 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating pointcache alembics."""
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
|
||||
class CreatePointCache(plugin.HoudiniCreator):
|
||||
"""Alembic ROP to pointcache"""
|
||||
identifier = "io.openpype.creators.houdini.pointcache"
|
||||
|
|
@ -15,6 +17,9 @@ class CreatePointCache(plugin.HoudiniCreator):
|
|||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "alembic"})
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
creator_attributes["farm"] = pre_create_data["farm"]
|
||||
|
||||
instance = super(CreatePointCache, self).create(
|
||||
subset_name,
|
||||
|
|
@ -105,3 +110,15 @@ class CreatePointCache(plugin.HoudiniCreator):
|
|||
else:
|
||||
return min(outputs,
|
||||
key=lambda node: node.evalParm('outputidx'))
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
# Use same attributes as for instance attributes
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
"""Creator plugin for creating Redshift proxies."""
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
|
||||
class CreateRedshiftProxy(plugin.HoudiniCreator):
|
||||
|
|
@ -24,6 +25,9 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
|
|||
# TODO: Somehow enforce so that it only shows the original limited
|
||||
# attributes of the Redshift_Proxy_Output node type
|
||||
instance_data.update({"node_type": "Redshift_Proxy_Output"})
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
creator_attributes["farm"] = pre_create_data["farm"]
|
||||
|
||||
instance = super(CreateRedshiftProxy, self).create(
|
||||
subset_name,
|
||||
|
|
@ -44,3 +48,15 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
|
|||
# Lock some Avalon attributes
|
||||
to_lock = ["family", "id", "prim_to_detail_pattern"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
# Use same attributes as for instance attributes
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
"""Creator plugin for creating VDB Caches."""
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
import hou
|
||||
|
||||
|
|
@ -19,15 +20,20 @@ class CreateVDBCache(plugin.HoudiniCreator):
|
|||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "geometry"})
|
||||
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
creator_attributes["farm"] = pre_create_data["farm"]
|
||||
instance = super(CreateVDBCache, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
file_path = "{}{}".format(
|
||||
hou.text.expandString("$HIP/pyblish/"),
|
||||
"{}.$F4.vdb".format(subset_name))
|
||||
parms = {
|
||||
"sopoutput": "$HIP/pyblish/{}.$F4.vdb".format(subset_name),
|
||||
"sopoutput": file_path,
|
||||
"initsim": True,
|
||||
"trange": 1
|
||||
}
|
||||
|
|
@ -102,3 +108,15 @@ class CreateVDBCache(plugin.HoudiniCreator):
|
|||
else:
|
||||
return min(outputs,
|
||||
key=lambda node: node.evalParm('outputidx'))
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
# Use same attributes as for instance attributes
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -4,13 +4,6 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.hosts.houdini.api import pipeline
|
||||
|
||||
from openpype.hosts.houdini.api.lib import (
|
||||
set_camera_resolution,
|
||||
get_camera_from_container
|
||||
)
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
ARCHIVE_EXPRESSION = ('__import__("_alembic_hom_extensions")'
|
||||
'.alembicGetCameraDict')
|
||||
|
|
@ -32,15 +25,7 @@ def transfer_non_default_values(src, dest, ignore=None):
|
|||
channel expression and ignore certain Parm types.
|
||||
|
||||
"""
|
||||
|
||||
ignore_types = {
|
||||
hou.parmTemplateType.Toggle,
|
||||
hou.parmTemplateType.Menu,
|
||||
hou.parmTemplateType.Button,
|
||||
hou.parmTemplateType.FolderSet,
|
||||
hou.parmTemplateType.Separator,
|
||||
hou.parmTemplateType.Label,
|
||||
}
|
||||
import hou
|
||||
|
||||
src.updateParmStates()
|
||||
|
||||
|
|
@ -77,6 +62,14 @@ def transfer_non_default_values(src, dest, ignore=None):
|
|||
continue
|
||||
|
||||
# Ignore folders, separators, etc.
|
||||
ignore_types = {
|
||||
hou.parmTemplateType.Toggle,
|
||||
hou.parmTemplateType.Menu,
|
||||
hou.parmTemplateType.Button,
|
||||
hou.parmTemplateType.FolderSet,
|
||||
hou.parmTemplateType.Separator,
|
||||
hou.parmTemplateType.Label,
|
||||
}
|
||||
if parm.parmTemplate().type() in ignore_types:
|
||||
continue
|
||||
|
||||
|
|
@ -97,8 +90,13 @@ class CameraLoader(load.LoaderPlugin):
|
|||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
import os
|
||||
import hou
|
||||
|
||||
# Format file name, Houdini only wants forward slashes
|
||||
file_path = self.filepath_from_context(context).replace("\\", "/")
|
||||
file_path = self.filepath_from_context(context)
|
||||
file_path = os.path.normpath(file_path)
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
# Get the root node
|
||||
obj = hou.node("/obj")
|
||||
|
|
@ -108,21 +106,19 @@ class CameraLoader(load.LoaderPlugin):
|
|||
node_name = "{}_{}".format(namespace, name) if namespace else name
|
||||
|
||||
# Create a archive node
|
||||
node = self.create_and_connect(obj, "alembicarchive", node_name)
|
||||
container = self.create_and_connect(obj, "alembicarchive", node_name)
|
||||
|
||||
# TODO: add FPS of project / asset
|
||||
node.setParms({"fileName": file_path, "channelRef": True})
|
||||
container.setParms({"fileName": file_path,
|
||||
"channelRef": True})
|
||||
|
||||
# Apply some magic
|
||||
node.parm("buildHierarchy").pressButton()
|
||||
node.moveToGoodPosition()
|
||||
container.parm("buildHierarchy").pressButton()
|
||||
container.moveToGoodPosition()
|
||||
|
||||
# Create an alembic xform node
|
||||
nodes = [node]
|
||||
nodes = [container]
|
||||
|
||||
camera = get_camera_from_container(node)
|
||||
self._match_maya_render_mask(camera)
|
||||
set_camera_resolution(camera, asset_doc=context["asset"])
|
||||
self[:] = nodes
|
||||
|
||||
return pipeline.containerise(node_name,
|
||||
|
|
@ -147,14 +143,14 @@ class CameraLoader(load.LoaderPlugin):
|
|||
# Store the cam temporarily next to the Alembic Archive
|
||||
# so that we can preserve parm values the user set on it
|
||||
# after build hierarchy was triggered.
|
||||
old_camera = get_camera_from_container(node)
|
||||
old_camera = self._get_camera(node)
|
||||
temp_camera = old_camera.copyTo(node.parent())
|
||||
|
||||
# Rebuild
|
||||
node.parm("buildHierarchy").pressButton()
|
||||
|
||||
# Apply values to the new camera
|
||||
new_camera = get_camera_from_container(node)
|
||||
new_camera = self._get_camera(node)
|
||||
transfer_non_default_values(temp_camera,
|
||||
new_camera,
|
||||
# The hidden uniform scale attribute
|
||||
|
|
@ -162,9 +158,6 @@ class CameraLoader(load.LoaderPlugin):
|
|||
# "icon_scale" just skip that completely
|
||||
ignore={"scale"})
|
||||
|
||||
self._match_maya_render_mask(new_camera)
|
||||
set_camera_resolution(new_camera)
|
||||
|
||||
temp_camera.destroy()
|
||||
|
||||
def remove(self, container):
|
||||
|
|
@ -172,6 +165,15 @@ class CameraLoader(load.LoaderPlugin):
|
|||
node = container["node"]
|
||||
node.destroy()
|
||||
|
||||
def _get_camera(self, node):
|
||||
import hou
|
||||
cameras = node.recursiveGlob("*",
|
||||
filter=hou.nodeTypeFilter.ObjCamera,
|
||||
include_subnets=False)
|
||||
|
||||
assert len(cameras) == 1, "Camera instance must have only one camera"
|
||||
return cameras[0]
|
||||
|
||||
def create_and_connect(self, node, node_type, name=None):
|
||||
"""Create a node within a node which and connect it to the input
|
||||
|
||||
|
|
@ -192,20 +194,5 @@ class CameraLoader(load.LoaderPlugin):
|
|||
new_node.moveToGoodPosition()
|
||||
return new_node
|
||||
|
||||
def _match_maya_render_mask(self, camera):
|
||||
"""Workaround to match Maya render mask in Houdini"""
|
||||
|
||||
# print("Setting match maya render mask ")
|
||||
parm = camera.parm("aperture")
|
||||
expression = parm.expression()
|
||||
expression = expression.replace("return ", "aperture = ")
|
||||
expression += """
|
||||
# Match maya render mask (logic from Houdini's own FBX importer)
|
||||
node = hou.pwd()
|
||||
resx = node.evalParm('resx')
|
||||
resy = node.evalParm('resy')
|
||||
aspect = node.evalParm('aspect')
|
||||
aperture *= min(1, (resx / resy * aspect) / 1.5)
|
||||
return aperture
|
||||
"""
|
||||
parm.setExpression(expression, language=hou.exprLanguage.Python)
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
|
|||
75
openpype/hosts/houdini/plugins/publish/collect_cache_farm.py
Normal file
75
openpype/hosts/houdini/plugins/publish/collect_cache_farm.py
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import hou
|
||||
from openpype.hosts.houdini.api import lib
|
||||
|
||||
|
||||
class CollectDataforCache(pyblish.api.InstancePlugin):
|
||||
"""Collect data for caching to Deadline."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.04
|
||||
families = ["ass", "pointcache",
|
||||
"mantraifd", "redshiftproxy",
|
||||
"vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect Data for Cache"
|
||||
|
||||
def process(self, instance):
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
farm_enabled = creator_attribute["farm"]
|
||||
instance.data["farm"] = farm_enabled
|
||||
if not farm_enabled:
|
||||
self.log.debug("Caching on farm is disabled. "
|
||||
"Skipping farm collecting.")
|
||||
return
|
||||
# Why do we need this particular collector to collect the expected
|
||||
# output files from a ROP node. Don't we have a dedicated collector
|
||||
# for that yet?
|
||||
# Collect expected files
|
||||
ropnode = hou.node(instance.data["instance_node"])
|
||||
output_parm = lib.get_output_parameter(ropnode)
|
||||
expected_filepath = output_parm.eval()
|
||||
instance.data.setdefault("files", list())
|
||||
instance.data.setdefault("expectedFiles", list())
|
||||
if instance.data.get("frames"):
|
||||
files = self.get_files(instance, expected_filepath)
|
||||
# list of files
|
||||
instance.data["files"].extend(files)
|
||||
else:
|
||||
# single file
|
||||
instance.data["files"].append(output_parm.eval())
|
||||
cache_files = {"_": instance.data["files"]}
|
||||
# Convert instance family to pointcache if it is bgeo or abc
|
||||
# because ???
|
||||
for family in instance.data["families"]:
|
||||
if family == "bgeo" or "abc":
|
||||
instance.data["family"] = "pointcache"
|
||||
break
|
||||
instance.data.update({
|
||||
"plugin": "Houdini",
|
||||
"publish": True
|
||||
})
|
||||
instance.data["families"].append("publish.hou")
|
||||
instance.data["expectedFiles"].append(cache_files)
|
||||
|
||||
self.log.debug("{}".format(instance.data))
|
||||
|
||||
def get_files(self, instance, output_parm):
|
||||
"""Get the files with the frame range data
|
||||
|
||||
Args:
|
||||
instance (_type_): instance
|
||||
output_parm (_type_): path of output parameter
|
||||
|
||||
Returns:
|
||||
files: a list of files
|
||||
"""
|
||||
directory = os.path.dirname(output_parm)
|
||||
|
||||
files = [
|
||||
os.path.join(directory, frame).replace("\\", "/")
|
||||
for frame in instance.data["frames"]
|
||||
]
|
||||
|
||||
return files
|
||||
39
openpype/hosts/houdini/plugins/publish/collect_chunk_size.py
Normal file
39
openpype/hosts/houdini/plugins/publish/collect_chunk_size.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
import pyblish.api
|
||||
from openpype.lib import NumberDef
|
||||
from openpype.pipeline import OpenPypePyblishPluginMixin
|
||||
|
||||
|
||||
class CollectChunkSize(pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin):
|
||||
"""Collect chunk size for cache submission to Deadline."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.05
|
||||
families = ["ass", "pointcache",
|
||||
"vdbcache", "mantraifd",
|
||||
"redshiftproxy"]
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect Chunk Size"
|
||||
chunkSize = 999999
|
||||
|
||||
def process(self, instance):
|
||||
# need to get the chunk size info from the setting
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
instance.data["chunkSize"] = attr_values.get("chunkSize")
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
project_setting = project_settings["houdini"]["publish"]["CollectChunkSize"] # noqa
|
||||
cls.chunkSize = project_setting["chunk_size"]
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
NumberDef("chunkSize",
|
||||
minimum=1,
|
||||
maximum=999999,
|
||||
decimals=0,
|
||||
default=cls.chunkSize,
|
||||
label="Frame Per Task")
|
||||
|
||||
]
|
||||
|
|
@ -14,7 +14,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Frames"
|
||||
families = ["vdbcache", "imagesequence", "ass",
|
||||
"redshiftproxy", "review", "bgeo"]
|
||||
"mantraifd", "redshiftproxy", "review",
|
||||
"bgeo"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -14,8 +14,12 @@ class ExtractAlembic(publish.Extractor):
|
|||
label = "Extract Alembic"
|
||||
hosts = ["houdini"]
|
||||
families = ["abc", "camera"]
|
||||
targets = ["local", "remote"]
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
|
||||
ropnode = hou.node(instance.data["instance_node"])
|
||||
|
||||
|
|
|
|||
|
|
@ -14,9 +14,12 @@ class ExtractAss(publish.Extractor):
|
|||
label = "Extract Ass"
|
||||
families = ["ass"]
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
ropnode = hou.node(instance.data["instance_node"])
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
|
|
|
|||
|
|
@ -17,7 +17,9 @@ class ExtractBGEO(publish.Extractor):
|
|||
families = ["bgeo"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
ropnode = hou.node(instance.data["instance_node"])
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
|
|
|
|||
51
openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py
Normal file
51
openpype/hosts/houdini/plugins/publish/extract_mantra_ifd.py
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import publish
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
class ExtractMantraIFD(publish.Extractor):
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Mantra ifd"
|
||||
hosts = ["houdini"]
|
||||
families = ["mantraifd"]
|
||||
targets = ["local", "remote"]
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
|
||||
ropnode = hou.node(instance.data.get("instance_node"))
|
||||
output = ropnode.evalParm("soho_diskfile")
|
||||
staging_dir = os.path.dirname(output)
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
files = instance.data["frames"]
|
||||
missing_frames = [
|
||||
frame
|
||||
for frame in instance.data["frames"]
|
||||
if not os.path.exists(
|
||||
os.path.normpath(os.path.join(staging_dir, frame)))
|
||||
]
|
||||
if missing_frames:
|
||||
raise RuntimeError("Failed to complete Mantra ifd extraction. "
|
||||
"Missing output files: {}".format(
|
||||
missing_frames))
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'ifd',
|
||||
'ext': 'ifd',
|
||||
'files': files,
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
@ -14,9 +14,12 @@ class ExtractRedshiftProxy(publish.Extractor):
|
|||
label = "Extract Redshift Proxy"
|
||||
families = ["redshiftproxy"]
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
ropnode = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
|
|
|
|||
|
|
@ -16,7 +16,9 @@ class ExtractVDBCache(publish.Extractor):
|
|||
hosts = ["houdini"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Should be processed on farm, skipping.")
|
||||
return
|
||||
ropnode = hou.node(instance.data["instance_node"])
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
|
|
|
|||
|
|
@ -22,7 +22,8 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
"arnold_rop",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
"usdrender"]
|
||||
"usdrender",
|
||||
"publish.hou"]
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,183 @@
|
|||
import os
|
||||
import getpass
|
||||
from datetime import datetime
|
||||
|
||||
import hou
|
||||
|
||||
import attr
|
||||
import pyblish.api
|
||||
from openpype.lib import (
|
||||
TextDef,
|
||||
NumberDef,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
OpenPypePyblishPluginMixin
|
||||
)
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
|
||||
|
||||
@attr.s
|
||||
class HoudiniPluginInfo(object):
|
||||
Build = attr.ib(default=None)
|
||||
IgnoreInputs = attr.ib(default=True)
|
||||
ScriptJob = attr.ib(default=True)
|
||||
SceneFile = attr.ib(default=None) # Input
|
||||
SaveFile = attr.ib(default=True)
|
||||
ScriptFilename = attr.ib(default=None)
|
||||
OutputDriver = attr.ib(default=None)
|
||||
Version = attr.ib(default=None) # Mandatory for Deadline
|
||||
ProjectPath = attr.ib(default=None)
|
||||
|
||||
|
||||
class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, # noqa
|
||||
OpenPypePyblishPluginMixin):
|
||||
"""Submit Houdini scene to perform a local publish in Deadline.
|
||||
|
||||
Publishing in Deadline can be helpful for scenes that publish very slow.
|
||||
This way it can process in the background on another machine without the
|
||||
Artist having to wait for the publish to finish on their local machine.
|
||||
|
||||
Submission is done through the Deadline Web Service as
|
||||
supplied via the environment variable AVALON_DEADLINE.
|
||||
|
||||
"""
|
||||
|
||||
label = "Submit Scene to Deadline"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
hosts = ["houdini"]
|
||||
families = ["publish.hou"]
|
||||
targets = ["local"]
|
||||
|
||||
priority = 50
|
||||
jobInfo = {}
|
||||
pluginInfo = {}
|
||||
group = None
|
||||
|
||||
def get_job_info(self):
|
||||
job_info = DeadlineJobInfo(Plugin="Houdini")
|
||||
|
||||
job_info.update(self.jobInfo)
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
assert all(
|
||||
result["success"] for result in context.data["results"]
|
||||
), "Errors found, aborting integration.."
|
||||
|
||||
# Deadline connection
|
||||
AVALON_DEADLINE = legacy_io.Session.get(
|
||||
"AVALON_DEADLINE", "http://localhost:8082"
|
||||
)
|
||||
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
|
||||
|
||||
project_name = instance.context.data["projectName"]
|
||||
filepath = context.data["currentFile"]
|
||||
scenename = os.path.basename(filepath)
|
||||
job_name = "{scene} - {instance} [PUBLISH]".format(
|
||||
scene=scenename, instance=instance.name)
|
||||
batch_name = "{code} - {scene}".format(code=project_name,
|
||||
scene=scenename)
|
||||
if is_in_tests():
|
||||
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
|
||||
|
||||
job_info.Name = job_name
|
||||
job_info.BatchName = batch_name
|
||||
job_info.Plugin = instance.data["plugin"]
|
||||
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
|
||||
rop_node = self.get_rop_node(instance)
|
||||
if rop_node.type().name() != "alembic":
|
||||
frames = "{start}-{end}x{step}".format(
|
||||
start=int(instance.data["frameStart"]),
|
||||
end=int(instance.data["frameEnd"]),
|
||||
step=int(instance.data["byFrameStep"]),
|
||||
)
|
||||
|
||||
job_info.Frames = frames
|
||||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
|
||||
job_info.ChunkSize = instance.data["chunkSize"]
|
||||
job_info.Comment = context.data.get("comment")
|
||||
job_info.Priority = attr_values.get("priority", self.priority)
|
||||
job_info.Group = attr_values.get("group", self.group)
|
||||
|
||||
keys = [
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
# Add mongo url if it's enabled
|
||||
if self._instance.context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
for key in keys:
|
||||
value = environment.get(key)
|
||||
if not value:
|
||||
continue
|
||||
job_info.EnvironmentKeyValue[key] = value
|
||||
# to recognize render jobs
|
||||
job_info.add_render_job_env_var()
|
||||
|
||||
return job_info
|
||||
|
||||
def get_plugin_info(self):
|
||||
instance = self._instance
|
||||
version = hou.applicationVersionString()
|
||||
version = ".".join(version.split(".")[:2])
|
||||
rop = self.get_rop_node(instance)
|
||||
plugin_info = HoudiniPluginInfo(
|
||||
Build=None,
|
||||
IgnoreInputs=True,
|
||||
ScriptJob=True,
|
||||
SceneFile=self.scene_path,
|
||||
SaveFile=True,
|
||||
OutputDriver=rop.path(),
|
||||
Version=version,
|
||||
ProjectPath=os.path.dirname(self.scene_path)
|
||||
)
|
||||
|
||||
plugin_payload = attr.asdict(plugin_info)
|
||||
|
||||
return plugin_payload
|
||||
|
||||
def process(self, instance):
|
||||
super(HoudiniCacheSubmitDeadline, self).process(instance)
|
||||
output_dir = os.path.dirname(instance.data["files"][0])
|
||||
instance.data["outputDir"] = output_dir
|
||||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
|
||||
def get_rop_node(self, instance):
|
||||
rop = instance.data.get("instance_node")
|
||||
rop_node = hou.node(rop)
|
||||
|
||||
return rop_node
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
defs = super(HoudiniCacheSubmitDeadline, cls).get_attribute_defs()
|
||||
defs.extend([
|
||||
NumberDef("priority",
|
||||
minimum=1,
|
||||
maximum=250,
|
||||
decimals=0,
|
||||
default=cls.priority,
|
||||
label="Priority"),
|
||||
TextDef("group",
|
||||
default=cls.group,
|
||||
label="Group Name"),
|
||||
])
|
||||
|
||||
return defs
|
||||
|
|
@ -0,0 +1,501 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Submit publishing job to farm."""
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from copy import deepcopy
|
||||
import requests
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.client import (
|
||||
get_last_version_by_subset_name,
|
||||
)
|
||||
from openpype.pipeline import publish, legacy_io
|
||||
from openpype.lib import EnumDef, is_running_from_build
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.pipeline.version_start import get_versioning_start
|
||||
|
||||
from openpype.pipeline.farm.pyblish_functions import (
|
||||
create_skeleton_instance_cache,
|
||||
create_instances_for_cache,
|
||||
attach_instances_to_subset,
|
||||
prepare_cache_representations,
|
||||
create_metadata_path
|
||||
)
|
||||
|
||||
|
||||
class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
|
||||
publish.OpenPypePyblishPluginMixin,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
"""Process Cache Job submitted on farm
|
||||
This is replicated version of submit publish job
|
||||
specifically for cache(s).
|
||||
|
||||
These jobs are dependent on a deadline job
|
||||
submission prior to this plug-in.
|
||||
|
||||
- In case of Deadline, it creates dependent job on farm publishing
|
||||
rendered image sequence.
|
||||
|
||||
Options in instance.data:
|
||||
- deadlineSubmissionJob (dict, Required): The returned .json
|
||||
data from the job submission to deadline.
|
||||
|
||||
- outputDir (str, Required): The output directory where the metadata
|
||||
file should be generated. It's assumed that this will also be
|
||||
final folder containing the output files.
|
||||
|
||||
- ext (str, Optional): The extension (including `.`) that is required
|
||||
in the output filename to be picked up for image sequence
|
||||
publishing.
|
||||
|
||||
- expectedFiles (list or dict): explained below
|
||||
|
||||
"""
|
||||
|
||||
label = "Submit cache jobs to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.2
|
||||
icon = "tractor"
|
||||
|
||||
targets = ["local"]
|
||||
|
||||
hosts = ["houdini"]
|
||||
|
||||
families = ["publish.hou"]
|
||||
|
||||
environ_job_filter = [
|
||||
"OPENPYPE_METADATA_FILE"
|
||||
]
|
||||
|
||||
environ_keys = [
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_USERNAME",
|
||||
"OPENPYPE_SG_USER",
|
||||
]
|
||||
|
||||
# custom deadline attributes
|
||||
deadline_department = ""
|
||||
deadline_pool = ""
|
||||
deadline_pool_secondary = ""
|
||||
deadline_group = ""
|
||||
deadline_chunk_size = 1
|
||||
deadline_priority = None
|
||||
|
||||
# regex for finding frame number in string
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
|
||||
plugin_pype_version = "3.0"
|
||||
|
||||
# script path for publish_filesequence.py
|
||||
publishing_script = None
|
||||
|
||||
def _submit_deadline_post_job(self, instance, job):
|
||||
"""Submit publish job to Deadline.
|
||||
|
||||
Deadline specific code separated from :meth:`process` for sake of
|
||||
more universal code. Muster post job is sent directly by Muster
|
||||
submitter, so this type of code isn't necessary for it.
|
||||
|
||||
Returns:
|
||||
(str): deadline_publish_job_id
|
||||
"""
|
||||
data = instance.data.copy()
|
||||
subset = data["subset"]
|
||||
job_name = "Publish - {subset}".format(subset=subset)
|
||||
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
# instance.data.get("subset") != instances[0]["subset"]
|
||||
# 'Main' vs 'renderMain'
|
||||
override_version = None
|
||||
instance_version = instance.data.get("version") # take this if exists
|
||||
if instance_version != 1:
|
||||
override_version = instance_version
|
||||
|
||||
output_dir = self._get_publish_folder(
|
||||
anatomy,
|
||||
deepcopy(instance.data["anatomyData"]),
|
||||
instance.data.get("asset"),
|
||||
instance.data["subset"],
|
||||
instance.context,
|
||||
instance.data["family"],
|
||||
override_version
|
||||
)
|
||||
|
||||
# Transfer the environment from the original job to this dependent
|
||||
# job so they use the same environment
|
||||
metadata_path, rootless_metadata_path = \
|
||||
create_metadata_path(instance, anatomy)
|
||||
|
||||
environment = {
|
||||
"AVALON_PROJECT": instance.context.data["projectName"],
|
||||
"AVALON_ASSET": instance.context.data["asset"],
|
||||
"AVALON_TASK": instance.context.data["task"],
|
||||
"OPENPYPE_USERNAME": instance.context.data["user"],
|
||||
"OPENPYPE_LOG_NO_COLORS": "1",
|
||||
"IS_TEST": str(int(is_in_tests()))
|
||||
}
|
||||
|
||||
if AYON_SERVER_ENABLED:
|
||||
environment["AYON_PUBLISH_JOB"] = "1"
|
||||
environment["AYON_RENDER_JOB"] = "0"
|
||||
environment["AYON_REMOTE_PUBLISH"] = "0"
|
||||
environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"]
|
||||
deadline_plugin = "Ayon"
|
||||
else:
|
||||
environment["OPENPYPE_PUBLISH_JOB"] = "1"
|
||||
environment["OPENPYPE_RENDER_JOB"] = "0"
|
||||
environment["OPENPYPE_REMOTE_PUBLISH"] = "0"
|
||||
deadline_plugin = "OpenPype"
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
self.environ_keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# add environments from self.environ_keys
|
||||
for env_key in self.environ_keys:
|
||||
if os.getenv(env_key):
|
||||
environment[env_key] = os.environ[env_key]
|
||||
|
||||
# pass environment keys from self.environ_job_filter
|
||||
job_environ = job["Props"].get("Env", {})
|
||||
for env_j_key in self.environ_job_filter:
|
||||
if job_environ.get(env_j_key):
|
||||
environment[env_j_key] = job_environ[env_j_key]
|
||||
|
||||
# Add mongo url if it's enabled
|
||||
if instance.context.data.get("deadlinePassMongoUrl"):
|
||||
mongo_url = os.environ.get("OPENPYPE_MONGO")
|
||||
if mongo_url:
|
||||
environment["OPENPYPE_MONGO"] = mongo_url
|
||||
|
||||
priority = self.deadline_priority or instance.data.get("priority", 50)
|
||||
|
||||
instance_settings = self.get_attr_values_from_data(instance.data)
|
||||
initial_status = instance_settings.get("publishJobState", "Active")
|
||||
# TODO: Remove this backwards compatibility of `suspend_publish`
|
||||
if instance.data.get("suspend_publish"):
|
||||
initial_status = "Suspended"
|
||||
|
||||
args = [
|
||||
"--headless",
|
||||
'publish',
|
||||
'"{}"'.format(rootless_metadata_path),
|
||||
"--targets", "deadline",
|
||||
"--targets", "farm"
|
||||
]
|
||||
|
||||
if is_in_tests():
|
||||
args.append("--automatic-tests")
|
||||
|
||||
# Generate the payload for Deadline submission
|
||||
secondary_pool = (
|
||||
self.deadline_pool_secondary or instance.data.get("secondaryPool")
|
||||
)
|
||||
payload = {
|
||||
"JobInfo": {
|
||||
"Plugin": deadline_plugin,
|
||||
"BatchName": job["Props"]["Batch"],
|
||||
"Name": job_name,
|
||||
"UserName": job["Props"]["User"],
|
||||
"Comment": instance.context.data.get("comment", ""),
|
||||
|
||||
"Department": self.deadline_department,
|
||||
"ChunkSize": self.deadline_chunk_size,
|
||||
"Priority": priority,
|
||||
"InitialStatus": initial_status,
|
||||
|
||||
"Group": self.deadline_group,
|
||||
"Pool": self.deadline_pool or instance.data.get("primaryPool"),
|
||||
"SecondaryPool": secondary_pool,
|
||||
# ensure the outputdirectory with correct slashes
|
||||
"OutputDirectory0": output_dir.replace("\\", "/")
|
||||
},
|
||||
"PluginInfo": {
|
||||
"Version": self.plugin_pype_version,
|
||||
"Arguments": " ".join(args),
|
||||
"SingleFrameOnly": "True",
|
||||
},
|
||||
# Mandatory for Deadline, may be empty
|
||||
"AuxFiles": [],
|
||||
}
|
||||
|
||||
if job.get("_id"):
|
||||
payload["JobInfo"]["JobDependency0"] = job["_id"]
|
||||
|
||||
for index, (key_, value_) in enumerate(environment.items()):
|
||||
payload["JobInfo"].update(
|
||||
{
|
||||
"EnvironmentKeyValue%d"
|
||||
% index: "{key}={value}".format(
|
||||
key=key_, value=value_
|
||||
)
|
||||
}
|
||||
)
|
||||
# remove secondary pool
|
||||
payload["JobInfo"].pop("SecondaryPool", None)
|
||||
|
||||
self.log.debug("Submitting Deadline publish job ...")
|
||||
|
||||
url = "{}/api/jobs".format(self.deadline_url)
|
||||
response = requests.post(url, json=payload, timeout=10)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
||||
deadline_publish_job_id = response.json()["_id"]
|
||||
|
||||
return deadline_publish_job_id
|
||||
|
||||
def process(self, instance):
|
||||
# type: (pyblish.api.Instance) -> None
|
||||
"""Process plugin.
|
||||
|
||||
Detect type of render farm submission and create and post dependent
|
||||
job in case of Deadline. It creates json file with metadata needed for
|
||||
publishing in directory of render.
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): Instance data.
|
||||
|
||||
"""
|
||||
if not instance.data.get("farm"):
|
||||
self.log.debug("Skipping local instance.")
|
||||
return
|
||||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
instance_skeleton_data = create_skeleton_instance_cache(instance)
|
||||
"""
|
||||
if content of `expectedFiles` list are dictionaries, we will handle
|
||||
it as list of AOVs, creating instance for every one of them.
|
||||
|
||||
Example:
|
||||
--------
|
||||
|
||||
expectedFiles = [
|
||||
{
|
||||
"beauty": [
|
||||
"foo_v01.0001.exr",
|
||||
"foo_v01.0002.exr"
|
||||
],
|
||||
|
||||
"Z": [
|
||||
"boo_v01.0001.exr",
|
||||
"boo_v01.0002.exr"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
This will create instances for `beauty` and `Z` subset
|
||||
adding those files to their respective representations.
|
||||
|
||||
If we have only list of files, we collect all file sequences.
|
||||
More then one doesn't probably make sense, but we'll handle it
|
||||
like creating one instance with multiple representations.
|
||||
|
||||
Example:
|
||||
--------
|
||||
|
||||
expectedFiles = [
|
||||
"foo_v01.0001.exr",
|
||||
"foo_v01.0002.exr",
|
||||
"xxx_v01.0001.exr",
|
||||
"xxx_v01.0002.exr"
|
||||
]
|
||||
|
||||
This will result in one instance with two representations:
|
||||
`foo` and `xxx`
|
||||
"""
|
||||
|
||||
if isinstance(instance.data.get("expectedFiles")[0], dict):
|
||||
instances = create_instances_for_cache(
|
||||
instance, instance_skeleton_data)
|
||||
else:
|
||||
representations = prepare_cache_representations(
|
||||
instance_skeleton_data,
|
||||
instance.data.get("expectedFiles"),
|
||||
anatomy
|
||||
)
|
||||
|
||||
if "representations" not in instance_skeleton_data.keys():
|
||||
instance_skeleton_data["representations"] = []
|
||||
|
||||
# add representation
|
||||
instance_skeleton_data["representations"] += representations
|
||||
instances = [instance_skeleton_data]
|
||||
|
||||
# attach instances to subset
|
||||
if instance.data.get("attachTo"):
|
||||
instances = attach_instances_to_subset(
|
||||
instance.data.get("attachTo"), instances
|
||||
)
|
||||
|
||||
r''' SUBMiT PUBLiSH JOB 2 D34DLiN3
|
||||
____
|
||||
' ' .---. .---. .--. .---. .--..--..--..--. .---.
|
||||
| | --= \ | . \/ _|/ \| . \ || || \ |/ _|
|
||||
| JOB | --= / | | || __| .. | | | |;_ || \ || __|
|
||||
| | |____./ \.__|._||_.|___./|_____|||__|\__|\.___|
|
||||
._____.
|
||||
|
||||
'''
|
||||
|
||||
render_job = None
|
||||
submission_type = ""
|
||||
if instance.data.get("toBeRenderedOn") == "deadline":
|
||||
render_job = instance.data.pop("deadlineSubmissionJob", None)
|
||||
submission_type = "deadline"
|
||||
|
||||
if not render_job:
|
||||
import getpass
|
||||
|
||||
render_job = {}
|
||||
self.log.debug("Faking job data ...")
|
||||
render_job["Props"] = {}
|
||||
# Render job doesn't exist because we do not have prior submission.
|
||||
# We still use data from it so lets fake it.
|
||||
#
|
||||
# Batch name reflect original scene name
|
||||
|
||||
if instance.data.get("assemblySubmissionJobs"):
|
||||
render_job["Props"]["Batch"] = instance.data.get(
|
||||
"jobBatchName")
|
||||
else:
|
||||
batch = os.path.splitext(os.path.basename(
|
||||
instance.context.data.get("currentFile")))[0]
|
||||
render_job["Props"]["Batch"] = batch
|
||||
# User is deadline user
|
||||
render_job["Props"]["User"] = instance.context.data.get(
|
||||
"deadlineUser", getpass.getuser())
|
||||
|
||||
deadline_publish_job_id = None
|
||||
if submission_type == "deadline":
|
||||
# get default deadline webservice url from deadline module
|
||||
self.deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
self.deadline_url = instance.data.get("deadlineUrl")
|
||||
assert self.deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
deadline_publish_job_id = \
|
||||
self._submit_deadline_post_job(instance, render_job)
|
||||
|
||||
# Inject deadline url to instances.
|
||||
for inst in instances:
|
||||
inst["deadlineUrl"] = self.deadline_url
|
||||
|
||||
# publish job file
|
||||
publish_job = {
|
||||
"asset": instance_skeleton_data["asset"],
|
||||
"frameStart": instance_skeleton_data["frameStart"],
|
||||
"frameEnd": instance_skeleton_data["frameEnd"],
|
||||
"fps": instance_skeleton_data["fps"],
|
||||
"source": instance_skeleton_data["source"],
|
||||
"user": instance.context.data["user"],
|
||||
"version": instance.context.data["version"], # workfile version
|
||||
"intent": instance.context.data.get("intent"),
|
||||
"comment": instance.context.data.get("comment"),
|
||||
"job": render_job or None,
|
||||
"session": legacy_io.Session.copy(),
|
||||
"instances": instances
|
||||
}
|
||||
|
||||
if deadline_publish_job_id:
|
||||
publish_job["deadline_publish_job_id"] = deadline_publish_job_id
|
||||
|
||||
metadata_path, rootless_metadata_path = \
|
||||
create_metadata_path(instance, anatomy)
|
||||
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(publish_job, f, indent=4, sort_keys=True)
|
||||
|
||||
def _get_publish_folder(self, anatomy, template_data,
|
||||
asset, subset, context,
|
||||
family, version=None):
|
||||
"""
|
||||
Extracted logic to pre-calculate real publish folder, which is
|
||||
calculated in IntegrateNew inside of Deadline process.
|
||||
This should match logic in:
|
||||
'collect_anatomy_instance_data' - to
|
||||
get correct anatomy, family, version for subset and
|
||||
'collect_resources_path'
|
||||
get publish_path
|
||||
|
||||
Args:
|
||||
anatomy (openpype.pipeline.anatomy.Anatomy):
|
||||
template_data (dict): pre-calculated collected data for process
|
||||
asset (string): asset name
|
||||
subset (string): subset name (actually group name of subset)
|
||||
family (string): for current deadline process it's always 'render'
|
||||
TODO - for generic use family needs to be dynamically
|
||||
calculated like IntegrateNew does
|
||||
version (int): override version from instance if exists
|
||||
|
||||
Returns:
|
||||
(string): publish folder where rendered and published files will
|
||||
be stored
|
||||
based on 'publish' template
|
||||
"""
|
||||
|
||||
project_name = context.data["projectName"]
|
||||
if not version:
|
||||
version = get_last_version_by_subset_name(
|
||||
project_name,
|
||||
subset,
|
||||
asset_name=asset
|
||||
)
|
||||
if version:
|
||||
version = int(version["name"]) + 1
|
||||
else:
|
||||
version = get_versioning_start(
|
||||
project_name,
|
||||
template_data["app"],
|
||||
task_name=template_data["task"]["name"],
|
||||
task_type=template_data["task"]["type"],
|
||||
family="render",
|
||||
subset=subset,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
host_name = context.data["hostName"]
|
||||
task_info = template_data.get("task") or {}
|
||||
|
||||
template_name = publish.get_publish_template_name(
|
||||
project_name,
|
||||
host_name,
|
||||
family,
|
||||
task_info.get("name"),
|
||||
task_info.get("type"),
|
||||
)
|
||||
|
||||
template_data["subset"] = subset
|
||||
template_data["family"] = family
|
||||
template_data["version"] = version
|
||||
|
||||
render_templates = anatomy.templates_obj[template_name]
|
||||
if "folder" in render_templates:
|
||||
publish_folder = render_templates["folder"].format_strict(
|
||||
template_data
|
||||
)
|
||||
else:
|
||||
# solve deprecated situation when `folder` key is not underneath
|
||||
# `publish` anatomy
|
||||
self.log.warning((
|
||||
"Deprecation warning: Anatomy does not have set `folder`"
|
||||
" key underneath `publish` (in global of for project `{}`)."
|
||||
).format(project_name))
|
||||
|
||||
file_path = render_templates["path"].format_strict(template_data)
|
||||
publish_folder = os.path.dirname(file_path)
|
||||
|
||||
return publish_folder
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
EnumDef("publishJobState",
|
||||
label="Publish Job State",
|
||||
items=["Active", "Suspended"],
|
||||
default="Active")
|
||||
]
|
||||
|
|
@ -22,7 +22,8 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
|
|||
"render.frames_farm",
|
||||
"renderFarm",
|
||||
"renderlayer",
|
||||
"maxrender"]
|
||||
"maxrender",
|
||||
"publish.hou"]
|
||||
optional = True
|
||||
|
||||
# cache
|
||||
|
|
|
|||
|
|
@ -744,6 +744,238 @@ def get_resources(project_name, version, extension=None):
|
|||
return resources
|
||||
|
||||
|
||||
def create_skeleton_instance_cache(instance):
|
||||
# type: (pyblish.api.Instance, list, dict) -> dict
|
||||
"""Create skeleton instance from original instance data.
|
||||
|
||||
This will create dictionary containing skeleton
|
||||
- common - data used for publishing rendered instances.
|
||||
This skeleton instance is then extended with additional data
|
||||
and serialized to be processed by farm job.
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): Original instance to
|
||||
be used as a source of data.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary with skeleton instance data.
|
||||
|
||||
"""
|
||||
# list of family names to transfer to new family if present
|
||||
|
||||
context = instance.context
|
||||
data = instance.data.copy()
|
||||
anatomy = instance.context.data["anatomy"] # type: Anatomy
|
||||
|
||||
# get time related data from instance (or context)
|
||||
time_data = get_time_data_from_instance_or_context(instance)
|
||||
|
||||
if data.get("extendFrames", False):
|
||||
time_data.start, time_data.end = extend_frames(
|
||||
data["asset"],
|
||||
data["subset"],
|
||||
time_data.start,
|
||||
time_data.end,
|
||||
)
|
||||
|
||||
source = data.get("source") or context.data.get("currentFile")
|
||||
success, rootless_path = (
|
||||
anatomy.find_root_template_from_path(source)
|
||||
)
|
||||
if success:
|
||||
source = rootless_path
|
||||
else:
|
||||
# `rootless_path` is not set to `source` if none of roots match
|
||||
log = Logger.get_logger("farm_publishing")
|
||||
log.warning(("Could not find root path for remapping \"{}\". "
|
||||
"This may cause issues.").format(source))
|
||||
|
||||
family = instance.data["family"]
|
||||
# Make sure "render" is in the families to go through
|
||||
# validating expected and rendered files
|
||||
# during publishing job.
|
||||
families = ["render", family]
|
||||
|
||||
instance_skeleton_data = {
|
||||
"family": family,
|
||||
"subset": data["subset"],
|
||||
"families": families,
|
||||
"asset": data["asset"],
|
||||
"frameStart": time_data.start,
|
||||
"frameEnd": time_data.end,
|
||||
"handleStart": time_data.handle_start,
|
||||
"handleEnd": time_data.handle_end,
|
||||
"frameStartHandle": time_data.start - time_data.handle_start,
|
||||
"frameEndHandle": time_data.end + time_data.handle_end,
|
||||
"comment": data.get("comment"),
|
||||
"fps": time_data.fps,
|
||||
"source": source,
|
||||
"extendFrames": data.get("extendFrames"),
|
||||
"overrideExistingFrame": data.get("overrideExistingFrame"),
|
||||
"jobBatchName": data.get("jobBatchName", ""),
|
||||
# map inputVersions `ObjectId` -> `str` so json supports it
|
||||
"inputVersions": list(map(str, data.get("inputVersions", []))),
|
||||
}
|
||||
|
||||
# skip locking version if we are creating v01
|
||||
instance_version = data.get("version") # take this if exists
|
||||
if instance_version != 1:
|
||||
instance_skeleton_data["version"] = instance_version
|
||||
|
||||
representations = get_transferable_representations(instance)
|
||||
instance_skeleton_data["representations"] = representations
|
||||
|
||||
persistent = instance.data.get("stagingDir_persistent") is True
|
||||
instance_skeleton_data["stagingDir_persistent"] = persistent
|
||||
|
||||
return instance_skeleton_data
|
||||
|
||||
|
||||
def prepare_cache_representations(skeleton_data, exp_files, anatomy):
|
||||
"""Create representations for file sequences.
|
||||
|
||||
This will return representations of expected files if they are not
|
||||
in hierarchy of aovs. There should be only one sequence of files for
|
||||
most cases, but if not - we create representation from each of them.
|
||||
|
||||
Arguments:
|
||||
skeleton_data (dict): instance data for which we are
|
||||
setting representations
|
||||
exp_files (list): list of expected files
|
||||
anatomy (Anatomy)
|
||||
Returns:
|
||||
list of representations
|
||||
|
||||
"""
|
||||
representations = []
|
||||
collections, remainders = clique.assemble(exp_files)
|
||||
|
||||
log = Logger.get_logger("farm_publishing")
|
||||
|
||||
# create representation for every collected sequence
|
||||
for collection in collections:
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
staging = os.path.dirname(list(collection)[0])
|
||||
success, rootless_staging_dir = (
|
||||
anatomy.find_root_template_from_path(staging)
|
||||
)
|
||||
if success:
|
||||
staging = rootless_staging_dir
|
||||
else:
|
||||
log.warning((
|
||||
"Could not find root path for remapping \"{}\"."
|
||||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(collection)],
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": int(skeleton_data.get("frameEndHandle")),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"fps": skeleton_data.get("fps")
|
||||
}
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
return representations
|
||||
|
||||
|
||||
def create_instances_for_cache(instance, skeleton):
|
||||
"""Create instance for cache.
|
||||
|
||||
This will create new instance for every AOV it can detect in expected
|
||||
files list.
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): Original instance.
|
||||
skeleton (dict): Skeleton data for instance (those needed) later
|
||||
by collector.
|
||||
|
||||
|
||||
Returns:
|
||||
list of instances
|
||||
|
||||
Throws:
|
||||
ValueError:
|
||||
|
||||
"""
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
subset = skeleton["subset"]
|
||||
family = skeleton["family"]
|
||||
exp_files = instance.data["expectedFiles"]
|
||||
log = Logger.get_logger("farm_publishing")
|
||||
|
||||
instances = []
|
||||
# go through AOVs in expected files
|
||||
for _, files in exp_files[0].items():
|
||||
cols, rem = clique.assemble(files)
|
||||
# we shouldn't have any reminders. And if we do, it should
|
||||
# be just one item for single frame renders.
|
||||
if not cols and rem:
|
||||
if len(rem) != 1:
|
||||
raise ValueError("Found multiple non related files "
|
||||
"to render, don't know what to do "
|
||||
"with them.")
|
||||
col = rem[0]
|
||||
ext = os.path.splitext(col)[1].lstrip(".")
|
||||
else:
|
||||
# but we really expect only one collection.
|
||||
# Nothing else make sense.
|
||||
if len(cols) != 1:
|
||||
raise ValueError("Only one image sequence type is expected.") # noqa: E501
|
||||
ext = cols[0].tail.lstrip(".")
|
||||
col = list(cols[0])
|
||||
|
||||
if isinstance(col, (list, tuple)):
|
||||
staging = os.path.dirname(col[0])
|
||||
else:
|
||||
staging = os.path.dirname(col)
|
||||
|
||||
try:
|
||||
staging = remap_source(staging, anatomy)
|
||||
except ValueError as e:
|
||||
log.warning(e)
|
||||
|
||||
new_instance = deepcopy(skeleton)
|
||||
|
||||
new_instance["subset"] = subset
|
||||
log.info("Creating data for: {}".format(subset))
|
||||
new_instance["family"] = family
|
||||
new_instance["families"] = skeleton["families"]
|
||||
# create representation
|
||||
if isinstance(col, (list, tuple)):
|
||||
files = [os.path.basename(f) for f in col]
|
||||
else:
|
||||
files = os.path.basename(col)
|
||||
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": files,
|
||||
"frameStart": int(skeleton["frameStartHandle"]),
|
||||
"frameEnd": int(skeleton["frameEndHandle"]),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"fps": new_instance.get("fps"),
|
||||
"tags": [],
|
||||
}
|
||||
|
||||
new_instance["representations"] = [rep]
|
||||
|
||||
# if extending frames from existing version, copy files from there
|
||||
# into our destination directory
|
||||
if new_instance.get("extendFrames", False):
|
||||
copy_extend_frames(new_instance, rep)
|
||||
instances.append(new_instance)
|
||||
log.debug("instances:{}".format(instances))
|
||||
return instances
|
||||
|
||||
|
||||
def copy_extend_frames(instance, representation):
|
||||
"""Copy existing frames from latest version.
|
||||
|
||||
|
|
|
|||
|
|
@ -99,6 +99,14 @@
|
|||
"deadline_chunk_size": 10,
|
||||
"deadline_job_delay": "00:00:00:00"
|
||||
},
|
||||
"ProcessSubmittedCacheJobOnFarm": {
|
||||
"enabled": true,
|
||||
"deadline_department": "",
|
||||
"deadline_pool": "",
|
||||
"deadline_group": "",
|
||||
"deadline_chunk_size": 1,
|
||||
"deadline_priority": 50
|
||||
},
|
||||
"ProcessSubmittedJobOnFarm": {
|
||||
"enabled": true,
|
||||
"deadline_department": "",
|
||||
|
|
|
|||
|
|
@ -81,6 +81,11 @@
|
|||
}
|
||||
},
|
||||
"publish": {
|
||||
"CollectChunkSize": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
"chunk_size": 999999
|
||||
},
|
||||
"ValidateWorkfilePaths": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,31 @@
|
|||
"key": "publish",
|
||||
"label": "Publish plugins",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "CollectChunkSize",
|
||||
"label": "Collect Chunk Size",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"key": "chunk_size",
|
||||
"label": "Frames Per Task"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
|
|
|
|||
|
|
@ -217,6 +217,17 @@ class AOVFilterSubmodel(BaseSettingsModel):
|
|||
)
|
||||
|
||||
|
||||
class ProcessCacheJobFarmModel(BaseSettingsModel):
|
||||
"""Process submitted job on farm."""
|
||||
|
||||
enabled: bool = Field(title="Enabled")
|
||||
deadline_department: str = Field(title="Department")
|
||||
deadline_pool: str = Field(title="Pool")
|
||||
deadline_group: str = Field(title="Group")
|
||||
deadline_chunk_size: int = Field(title="Chunk Size")
|
||||
deadline_priority: int = Field(title="Priority")
|
||||
|
||||
|
||||
class ProcessSubmittedJobOnFarmModel(BaseSettingsModel):
|
||||
"""Process submitted job on farm."""
|
||||
|
||||
|
|
@ -278,6 +289,9 @@ class PublishPluginsModel(BaseSettingsModel):
|
|||
default_factory=CelactionSubmitDeadlineModel,
|
||||
title="Celaction Submit Deadline"
|
||||
)
|
||||
ProcessSubmittedCacheJobOnFarm: ProcessCacheJobFarmModel = Field(
|
||||
default_factory=ProcessCacheJobFarmModel,
|
||||
title="Process submitted cache Job on farm.")
|
||||
ProcessSubmittedJobOnFarm: ProcessSubmittedJobOnFarmModel = Field(
|
||||
default_factory=ProcessSubmittedJobOnFarmModel,
|
||||
title="Process submitted job on farm.")
|
||||
|
|
@ -384,6 +398,14 @@ DEFAULT_DEADLINE_PLUGINS_SETTINGS = {
|
|||
"deadline_chunk_size": 10,
|
||||
"deadline_job_delay": "00:00:00:00"
|
||||
},
|
||||
"ProcessSubmittedCacheJobOnFarm": {
|
||||
"enabled": True,
|
||||
"deadline_department": "",
|
||||
"deadline_pool": "",
|
||||
"deadline_group": "",
|
||||
"deadline_chunk_size": 1,
|
||||
"deadline_priority": 50
|
||||
},
|
||||
"ProcessSubmittedJobOnFarm": {
|
||||
"enabled": True,
|
||||
"deadline_department": "",
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__version__ = "0.1.1"
|
||||
__version__ = "0.1.2"
|
||||
|
|
|
|||
|
|
@ -107,6 +107,12 @@ DEFAULT_HOUDINI_CREATE_SETTINGS = {
|
|||
|
||||
|
||||
# Publish Plugins
|
||||
class CollectChunkSizeModel(BaseSettingsModel):
|
||||
enabled: bool = Field(title="Enabled")
|
||||
optional: bool = Field(title="Optional")
|
||||
chunk_size: int = Field(title="Frame Per Task")
|
||||
|
||||
|
||||
class ValidateWorkfilePathsModel(BaseSettingsModel):
|
||||
enabled: bool = Field(title="Enabled")
|
||||
optional: bool = Field(title="Optional")
|
||||
|
|
@ -127,6 +133,10 @@ class BasicValidateModel(BaseSettingsModel):
|
|||
|
||||
|
||||
class PublishPluginsModel(BaseSettingsModel):
|
||||
CollectChunkSize: CollectChunkSizeModel = Field(
|
||||
default_factory=CollectChunkSizeModel,
|
||||
title="Collect Chunk Size"
|
||||
)
|
||||
ValidateWorkfilePaths: ValidateWorkfilePathsModel = Field(
|
||||
default_factory=ValidateWorkfilePathsModel,
|
||||
title="Validate workfile paths settings.")
|
||||
|
|
@ -139,6 +149,11 @@ class PublishPluginsModel(BaseSettingsModel):
|
|||
|
||||
|
||||
DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
|
||||
"CollectChunkSize": {
|
||||
"enabled": True,
|
||||
"optional": True,
|
||||
"chunk_size": 999999
|
||||
},
|
||||
"ValidateWorkfilePaths": {
|
||||
"enabled": True,
|
||||
"optional": True,
|
||||
|
|
|
|||
|
|
@ -83,6 +83,30 @@ select your render camera.
|
|||
All the render outputs are stored in the pyblish/render directory within your project path.\
|
||||
For Karma-specific render, it also outputs the USD render as default.
|
||||
|
||||
## Publishing cache to Deadline
|
||||
Artist can publish cache to deadline which increases productivity as artist can use local machine
|
||||
could be used for other tasks.
|
||||
Caching on the farm is supported for:
|
||||
|
||||
**Arnold ASS (.ass)**
|
||||
**Pointcache (.bgeo and .abc)**
|
||||
**VDB (.vdb)**
|
||||
**Redshift Proxy (.rs)**
|
||||
|
||||
To submit your cache to deadline, you need to create the instance(s) with clicking
|
||||
**Submitting to Farm** and you can also enable **Use selection** to
|
||||
select the object for caching in farm.
|
||||

|
||||
|
||||
When you go to Publish Tab and click the instance(s), you can set up your preferred
|
||||
**Frame per task**.
|
||||

|
||||
|
||||
Once you hit **Publish**, the cache would be submitted and rendered in deadline.
|
||||
When the render is finished, all the caches would be located in your publish folder.
|
||||
You can see them in the Loader.
|
||||

|
||||
|
||||
## USD (experimental support)
|
||||
### Publishing USD
|
||||
You can publish your Solaris Stage as USD file.
|
||||
|
|
|
|||
BIN
website/docs/assets/houdini_farm_cache_creator.png
Normal file
BIN
website/docs/assets/houdini_farm_cache_creator.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 93 KiB |
BIN
website/docs/assets/houdini_farm_cache_loader.png
Normal file
BIN
website/docs/assets/houdini_farm_cache_loader.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 132 KiB |
BIN
website/docs/assets/houdini_frame_per_task.png
Normal file
BIN
website/docs/assets/houdini_frame_per_task.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 58 KiB |
Loading…
Add table
Add a link
Reference in a new issue