Merge branch 'develop' into enhancement/OP-6352_tycache-family

This commit is contained in:
Kayla Man 2023-10-03 14:55:16 +08:00
commit 3b8be809e0
47 changed files with 1987 additions and 183 deletions

View file

@ -35,6 +35,7 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.2-nightly.1
- 3.17.1
- 3.17.1-nightly.3
- 3.17.1-nightly.2
@ -134,7 +135,6 @@ body:
- 3.14.10-nightly.8
- 3.14.10-nightly.7
- 3.14.10-nightly.6
- 3.14.10-nightly.5
validations:
required: true
- type: dropdown

View file

@ -38,6 +38,8 @@ from .lib import (
from .capture import capture
from .render_lib import prepare_rendering
__all__ = [
"install",
@ -66,4 +68,5 @@ __all__ = [
"get_selection",
"capture",
# "unique_name",
"prepare_rendering",
]

View file

@ -0,0 +1,51 @@
import attr
import bpy
@attr.s
class LayerMetadata(object):
"""Data class for Render Layer metadata."""
frameStart = attr.ib()
frameEnd = attr.ib()
@attr.s
class RenderProduct(object):
"""
Getting Colorspace as Specific Render Product Parameter for submitting
publish job.
"""
colorspace = attr.ib() # colorspace
view = attr.ib() # OCIO view transform
productName = attr.ib(default=None)
class ARenderProduct(object):
def __init__(self):
"""Constructor."""
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_render_products()
def _get_layer_data(self):
scene = bpy.context.scene
return LayerMetadata(
frameStart=int(scene.frame_start),
frameEnd=int(scene.frame_end),
)
def get_render_products(self):
"""To be implemented by renderer class.
This should return a list of RenderProducts.
Returns:
list: List of RenderProduct
"""
return [
RenderProduct(
colorspace="sRGB",
view="ACES 1.0",
productName=""
)
]

View file

@ -16,6 +16,7 @@ import bpy
import bpy.utils.previews
from openpype import style
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import get_current_asset_name, get_current_task_name
from openpype.tools.utils import host_tools
@ -331,10 +332,11 @@ class LaunchWorkFiles(LaunchQtApp):
def execute(self, context):
result = super().execute(context)
self._window.set_context({
"asset": get_current_asset_name(),
"task": get_current_task_name()
})
if not AYON_SERVER_ENABLED:
self._window.set_context({
"asset": get_current_asset_name(),
"task": get_current_task_name()
})
return result
def before_window_show(self):

View file

@ -0,0 +1,255 @@
import os
import bpy
from openpype.settings import get_project_settings
from openpype.pipeline import get_current_project_name
def get_default_render_folder(settings):
"""Get default render folder from blender settings."""
return (settings["blender"]
["RenderSettings"]
["default_render_image_folder"])
def get_aov_separator(settings):
"""Get aov separator from blender settings."""
aov_sep = (settings["blender"]
["RenderSettings"]
["aov_separator"])
if aov_sep == "dash":
return "-"
elif aov_sep == "underscore":
return "_"
elif aov_sep == "dot":
return "."
else:
raise ValueError(f"Invalid aov separator: {aov_sep}")
def get_image_format(settings):
"""Get image format from blender settings."""
return (settings["blender"]
["RenderSettings"]
["image_format"])
def get_multilayer(settings):
"""Get multilayer from blender settings."""
return (settings["blender"]
["RenderSettings"]
["multilayer_exr"])
def get_render_product(output_path, name, aov_sep):
"""
Generate the path to the render product. Blender interprets the `#`
as the frame number, when it renders.
Args:
file_path (str): The path to the blender scene.
render_folder (str): The render folder set in settings.
file_name (str): The name of the blender scene.
instance (pyblish.api.Instance): The instance to publish.
ext (str): The image format to render.
"""
filepath = os.path.join(output_path, name)
render_product = f"{filepath}{aov_sep}beauty.####"
render_product = render_product.replace("\\", "/")
return render_product
def set_render_format(ext, multilayer):
# Set Blender to save the file with the right extension
bpy.context.scene.render.use_file_extension = True
image_settings = bpy.context.scene.render.image_settings
if ext == "exr":
image_settings.file_format = (
"OPEN_EXR_MULTILAYER" if multilayer else "OPEN_EXR")
elif ext == "bmp":
image_settings.file_format = "BMP"
elif ext == "rgb":
image_settings.file_format = "IRIS"
elif ext == "png":
image_settings.file_format = "PNG"
elif ext == "jpeg":
image_settings.file_format = "JPEG"
elif ext == "jp2":
image_settings.file_format = "JPEG2000"
elif ext == "tga":
image_settings.file_format = "TARGA"
elif ext == "tif":
image_settings.file_format = "TIFF"
def set_render_passes(settings):
aov_list = (settings["blender"]
["RenderSettings"]
["aov_list"])
custom_passes = (settings["blender"]
["RenderSettings"]
["custom_passes"])
vl = bpy.context.view_layer
vl.use_pass_combined = "combined" in aov_list
vl.use_pass_z = "z" in aov_list
vl.use_pass_mist = "mist" in aov_list
vl.use_pass_normal = "normal" in aov_list
vl.use_pass_diffuse_direct = "diffuse_light" in aov_list
vl.use_pass_diffuse_color = "diffuse_color" in aov_list
vl.use_pass_glossy_direct = "specular_light" in aov_list
vl.use_pass_glossy_color = "specular_color" in aov_list
vl.eevee.use_pass_volume_direct = "volume_light" in aov_list
vl.use_pass_emit = "emission" in aov_list
vl.use_pass_environment = "environment" in aov_list
vl.use_pass_shadow = "shadow" in aov_list
vl.use_pass_ambient_occlusion = "ao" in aov_list
cycles = vl.cycles
cycles.denoising_store_passes = "denoising" in aov_list
cycles.use_pass_volume_direct = "volume_direct" in aov_list
cycles.use_pass_volume_indirect = "volume_indirect" in aov_list
aovs_names = [aov.name for aov in vl.aovs]
for cp in custom_passes:
cp_name = cp[0]
if cp_name not in aovs_names:
aov = vl.aovs.add()
aov.name = cp_name
else:
aov = vl.aovs[cp_name]
aov.type = cp[1].get("type", "VALUE")
return aov_list, custom_passes
def set_node_tree(output_path, name, aov_sep, ext, multilayer):
# Set the scene to use the compositor node tree to render
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
# Get the Render Layers node
rl_node = None
for node in tree.nodes:
if node.bl_idname == "CompositorNodeRLayers":
rl_node = node
break
# If there's not a Render Layers node, we create it
if not rl_node:
rl_node = tree.nodes.new("CompositorNodeRLayers")
# Get the enabled output sockets, that are the active passes for the
# render.
# We also exclude some layers.
exclude_sockets = ["Image", "Alpha", "Noisy Image"]
passes = [
socket
for socket in rl_node.outputs
if socket.enabled and socket.name not in exclude_sockets
]
# Remove all output nodes
for node in tree.nodes:
if node.bl_idname == "CompositorNodeOutputFile":
tree.nodes.remove(node)
# Create a new output node
output = tree.nodes.new("CompositorNodeOutputFile")
image_settings = bpy.context.scene.render.image_settings
output.format.file_format = image_settings.file_format
# In case of a multilayer exr, we don't need to use the output node,
# because the blender render already outputs a multilayer exr.
if ext == "exr" and multilayer:
output.layer_slots.clear()
return []
output.file_slots.clear()
output.base_path = output_path
aov_file_products = []
# For each active render pass, we add a new socket to the output node
# and link it
for render_pass in passes:
filepath = f"{name}{aov_sep}{render_pass.name}.####"
output.file_slots.new(filepath)
aov_file_products.append(
(render_pass.name, os.path.join(output_path, filepath)))
node_input = output.inputs[-1]
tree.links.new(render_pass, node_input)
return aov_file_products
def imprint_render_settings(node, data):
RENDER_DATA = "render_data"
if not node.get(RENDER_DATA):
node[RENDER_DATA] = {}
for key, value in data.items():
if value is None:
continue
node[RENDER_DATA][key] = value
def prepare_rendering(asset_group):
name = asset_group.name
filepath = bpy.data.filepath
assert filepath, "Workfile not saved. Please save the file first."
file_path = os.path.dirname(filepath)
file_name = os.path.basename(filepath)
file_name, _ = os.path.splitext(file_name)
project = get_current_project_name()
settings = get_project_settings(project)
render_folder = get_default_render_folder(settings)
aov_sep = get_aov_separator(settings)
ext = get_image_format(settings)
multilayer = get_multilayer(settings)
set_render_format(ext, multilayer)
aov_list, custom_passes = set_render_passes(settings)
output_path = os.path.join(file_path, render_folder, file_name)
render_product = get_render_product(output_path, name, aov_sep)
aov_file_product = set_node_tree(
output_path, name, aov_sep, ext, multilayer)
bpy.context.scene.render.filepath = render_product
render_settings = {
"render_folder": render_folder,
"aov_separator": aov_sep,
"image_format": ext,
"multilayer_exr": multilayer,
"aov_list": aov_list,
"custom_passes": custom_passes,
"render_product": render_product,
"aov_file_product": aov_file_product,
"review": True,
}
imprint_render_settings(asset_group, render_settings)

View file

@ -0,0 +1,53 @@
"""Create render."""
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib
from openpype.hosts.blender.api.render_lib import prepare_rendering
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
class CreateRenderlayer(plugin.Creator):
"""Single baked camera"""
name = "renderingMain"
label = "Render"
family = "render"
icon = "eye"
def process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.collections.new(name=name)
try:
instances.children.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
prepare_rendering(asset_group)
except Exception:
# Remove the instance if there was an error
bpy.data.collections.remove(asset_group)
raise
# TODO: this is undesiderable, but it's the only way to be sure that
# the file is saved before the render starts.
# Blender, by design, doesn't set the file as dirty if modifications
# happen by script. So, when creating the instance and setting the
# render settings, the file is not marked as dirty. This means that
# there is the risk of sending to deadline a file without the right
# settings. Even the validator to check that the file is saved will
# detect the file as saved, even if it isn't. The only solution for
# now it is to force the file to be saved.
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
return asset_group

View file

@ -0,0 +1,123 @@
# -*- coding: utf-8 -*-
"""Collect render data."""
import os
import re
import bpy
from openpype.hosts.blender.api import colorspace
import pyblish.api
class CollectBlenderRender(pyblish.api.InstancePlugin):
"""Gather all publishable render layers from renderSetup."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["blender"]
families = ["render"]
label = "Collect Render Layers"
sync_workfile_version = False
@staticmethod
def generate_expected_beauty(
render_product, frame_start, frame_end, frame_step, ext
):
"""
Generate the expected files for the render product for the beauty
render. This returns a list of files that should be rendered. It
replaces the sequence of `#` with the frame number.
"""
path = os.path.dirname(render_product)
file = os.path.basename(render_product)
expected_files = []
for frame in range(frame_start, frame_end + 1, frame_step):
frame_str = str(frame).rjust(4, "0")
filename = re.sub("#+", frame_str, file)
expected_file = f"{os.path.join(path, filename)}.{ext}"
expected_files.append(expected_file.replace("\\", "/"))
return {
"beauty": expected_files
}
@staticmethod
def generate_expected_aovs(
aov_file_product, frame_start, frame_end, frame_step, ext
):
"""
Generate the expected files for the render product for the beauty
render. This returns a list of files that should be rendered. It
replaces the sequence of `#` with the frame number.
"""
expected_files = {}
for aov_name, aov_file in aov_file_product:
path = os.path.dirname(aov_file)
file = os.path.basename(aov_file)
aov_files = []
for frame in range(frame_start, frame_end + 1, frame_step):
frame_str = str(frame).rjust(4, "0")
filename = re.sub("#+", frame_str, file)
expected_file = f"{os.path.join(path, filename)}.{ext}"
aov_files.append(expected_file.replace("\\", "/"))
expected_files[aov_name] = aov_files
return expected_files
def process(self, instance):
context = instance.context
render_data = bpy.data.collections[str(instance)].get("render_data")
assert render_data, "No render data found."
self.log.info(f"render_data: {dict(render_data)}")
render_product = render_data.get("render_product")
aov_file_product = render_data.get("aov_file_product")
ext = render_data.get("image_format")
multilayer = render_data.get("multilayer_exr")
frame_start = context.data["frameStart"]
frame_end = context.data["frameEnd"]
frame_handle_start = context.data["frameStartHandle"]
frame_handle_end = context.data["frameEndHandle"]
expected_beauty = self.generate_expected_beauty(
render_product, int(frame_start), int(frame_end),
int(bpy.context.scene.frame_step), ext)
expected_aovs = self.generate_expected_aovs(
aov_file_product, int(frame_start), int(frame_end),
int(bpy.context.scene.frame_step), ext)
expected_files = expected_beauty | expected_aovs
instance.data.update({
"family": "render.farm",
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_handle_start,
"frameEndHandle": frame_handle_end,
"fps": context.data["fps"],
"byFrameStep": bpy.context.scene.frame_step,
"review": render_data.get("review", False),
"multipartExr": ext == "exr" and multilayer,
"farm": True,
"expectedFiles": [expected_files],
# OCIO not currently implemented in Blender, but the following
# settings are required by the schema, so it is hardcoded.
# TODO: Implement OCIO in Blender
"colorspaceConfig": "",
"colorspaceDisplay": "sRGB",
"colorspaceView": "ACES 1.0 SDR-video",
"renderProducts": colorspace.ARenderProduct(),
})
self.log.info(f"data: {instance.data}")

View file

@ -9,7 +9,8 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
label = "Increment Workfile Version"
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action", "layout", "blendScene"]
families = ["animation", "model", "rig", "action", "layout", "blendScene",
"render"]
def process(self, context):

View file

@ -0,0 +1,47 @@
import os
import bpy
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
from openpype.hosts.blender.api.render_lib import prepare_rendering
class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validates Render File Directory is
not the same in every submission
"""
order = ValidateContentsOrder
families = ["render.farm"]
hosts = ["blender"]
label = "Validate Render Output for Deadline"
optional = True
actions = [RepairAction]
def process(self, instance):
if not self.is_active(instance.data):
return
filepath = bpy.data.filepath
file = os.path.basename(filepath)
filename, ext = os.path.splitext(file)
if filename not in bpy.context.scene.render.filepath:
raise PublishValidationError(
"Render output folder "
"doesn't match the blender scene name! "
"Use Repair action to "
"fix the folder file path.."
)
@classmethod
def repair(cls, instance):
container = bpy.data.collections[str(instance)]
prepare_rendering(container)
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
cls.log.debug("Reset the render output folder...")

View file

@ -0,0 +1,20 @@
import bpy
import pyblish.api
class ValidateFileSaved(pyblish.api.InstancePlugin):
"""Validate that the workfile has been saved."""
order = pyblish.api.ValidatorOrder - 0.01
hosts = ["blender"]
label = "Validate File Saved"
optional = False
exclude_families = []
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
if bpy.data.is_dirty:
raise RuntimeError("Workfile is not saved.")

View file

@ -0,0 +1,17 @@
import bpy
import pyblish.api
class ValidateRenderCameraIsSet(pyblish.api.InstancePlugin):
"""Validate that there is a camera set as active for rendering."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["render"]
label = "Validate Render Camera Is Set"
optional = False
def process(self, instance):
if not bpy.context.scene.camera:
raise RuntimeError("No camera is active for rendering.")

View file

@ -2571,7 +2571,7 @@ def bake_to_world_space(nodes,
new_name = "{0}_baked".format(short_name)
new_node = cmds.duplicate(node,
name=new_name,
renameChildren=True)[0]
renameChildren=True)[0] # noqa
# Connect all attributes on the node except for transform
# attributes

View file

@ -0,0 +1,32 @@
from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.lib import BoolDef
class CreateMatchmove(plugin.MayaCreator):
"""Instance for more complex setup of cameras.
Might contain multiple cameras, geometries etc.
It is expected to be extracted into .abc or .ma
"""
identifier = "io.openpype.creators.maya.matchmove"
label = "Matchmove"
family = "matchmove"
icon = "video-camera"
def get_instance_attr_defs(self):
defs = lib.collect_animation_defs()
defs.extend([
BoolDef("bakeToWorldSpace",
label="Bake Cameras to World-Space",
tooltip="Bake Cameras to World-Space",
default=True),
])
return defs

View file

@ -1,12 +1,6 @@
from maya import cmds, mel
from openpype.client import (
get_asset_by_id,
get_subset_by_id,
get_version_by_id,
)
from openpype.pipeline import (
get_current_project_name,
load,
get_representation_path,
)
@ -18,7 +12,7 @@ class AudioLoader(load.LoaderPlugin):
"""Specific loader of audio."""
families = ["audio"]
label = "Import audio"
label = "Load audio"
representations = ["wav"]
icon = "volume-up"
color = "orange"
@ -27,10 +21,10 @@ class AudioLoader(load.LoaderPlugin):
start_frame = cmds.playbackOptions(query=True, min=True)
sound_node = cmds.sound(
file=context["representation"]["data"]["path"], offset=start_frame
file=self.filepath_from_context(context), offset=start_frame
)
cmds.timeControl(
mel.eval("$tmpVar=$gPlayBackSlider"),
mel.eval("$gPlayBackSlider=$gPlayBackSlider"),
edit=True,
sound=sound_node,
displaySound=True
@ -59,32 +53,50 @@ class AudioLoader(load.LoaderPlugin):
assert audio_nodes is not None, "Audio node not found."
audio_node = audio_nodes[0]
current_sound = cmds.timeControl(
mel.eval("$gPlayBackSlider=$gPlayBackSlider"),
query=True,
sound=True
)
activate_sound = current_sound == audio_node
path = get_representation_path(representation)
cmds.setAttr("{}.filename".format(audio_node), path, type="string")
cmds.sound(
audio_node,
edit=True,
file=path
)
# The source start + end does not automatically update itself to the
# length of thew new audio file, even though maya does do that when
# creating a new audio node. So to update we compute it manually.
# This would however override any source start and source end a user
# might have done on the original audio node after load.
audio_frame_count = cmds.getAttr("{}.frameCount".format(audio_node))
audio_sample_rate = cmds.getAttr("{}.sampleRate".format(audio_node))
duration_in_seconds = audio_frame_count / audio_sample_rate
fps = mel.eval('currentTimeUnitToFPS()') # workfile FPS
source_start = 0
source_end = (duration_in_seconds * fps)
cmds.setAttr("{}.sourceStart".format(audio_node), source_start)
cmds.setAttr("{}.sourceEnd".format(audio_node), source_end)
if activate_sound:
# maya by default deactivates it from timeline on file change
cmds.timeControl(
mel.eval("$gPlayBackSlider=$gPlayBackSlider"),
edit=True,
sound=audio_node,
displaySound=True
)
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
# Set frame range.
project_name = get_current_project_name()
version = get_version_by_id(
project_name, representation["parent"], fields=["parent"]
)
subset = get_subset_by_id(
project_name, version["parent"], fields=["parent"]
)
asset = get_asset_by_id(
project_name, subset["parent"], fields=["parent"]
)
source_start = 1 - asset["data"]["frameStart"]
source_end = asset["data"]["frameEnd"]
cmds.setAttr("{}.sourceStart".format(audio_node), source_start)
cmds.setAttr("{}.sourceEnd".format(audio_node), source_end)
def switch(self, container, representation):
self.update(container, representation)

View file

@ -101,7 +101,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"camerarig",
"staticMesh",
"skeletalMesh",
"mvLook"]
"mvLook",
"matchmove"]
representations = ["ma", "abc", "fbx", "mb"]

View file

@ -6,17 +6,21 @@ from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
class ExtractCameraAlembic(publish.Extractor):
class ExtractCameraAlembic(publish.Extractor,
publish.OptionalPyblishPluginMixin):
"""Extract a Camera as Alembic.
The cameras gets baked to world space by default. Only when the instance's
The camera gets baked to world space by default. Only when the instance's
`bakeToWorldSpace` is set to False it will include its full hierarchy.
'camera' family expects only single camera, if multiple cameras are needed,
'matchmove' is better choice.
"""
label = "Camera (Alembic)"
label = "Extract Camera (Alembic)"
hosts = ["maya"]
families = ["camera"]
families = ["camera", "matchmove"]
bake_attributes = []
def process(self, instance):
@ -35,10 +39,11 @@ class ExtractCameraAlembic(publish.Extractor):
# validate required settings
assert isinstance(step, float), "Step must be a float value"
camera = cameras[0]
# Define extract output file path
dir_path = self.staging_dir(instance)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
filename = "{0}.abc".format(instance.name)
path = os.path.join(dir_path, filename)
@ -64,9 +69,10 @@ class ExtractCameraAlembic(publish.Extractor):
# if baked, drop the camera hierarchy to maintain
# clean output and backwards compatibility
camera_root = cmds.listRelatives(
camera, parent=True, fullPath=True)[0]
job_str += ' -root {0}'.format(camera_root)
camera_roots = cmds.listRelatives(
cameras, parent=True, fullPath=True)
for camera_root in camera_roots:
job_str += ' -root {0}'.format(camera_root)
for member in members:
descendants = cmds.listRelatives(member,

View file

@ -2,11 +2,15 @@
"""Extract camera as Maya Scene."""
import os
import itertools
import contextlib
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
from openpype.lib import (
BoolDef
)
def massage_ma_file(path):
@ -78,7 +82,8 @@ def unlock(plug):
cmds.disconnectAttr(source, destination)
class ExtractCameraMayaScene(publish.Extractor):
class ExtractCameraMayaScene(publish.Extractor,
publish.OptionalPyblishPluginMixin):
"""Extract a Camera as Maya Scene.
This will create a duplicate of the camera that will be baked *with*
@ -88,17 +93,22 @@ class ExtractCameraMayaScene(publish.Extractor):
The cameras gets baked to world space by default. Only when the instance's
`bakeToWorldSpace` is set to False it will include its full hierarchy.
'camera' family expects only single camera, if multiple cameras are needed,
'matchmove' is better choice.
Note:
The extracted Maya ascii file gets "massaged" removing the uuid values
so they are valid for older versions of Fusion (e.g. 6.4)
"""
label = "Camera (Maya Scene)"
label = "Extract Camera (Maya Scene)"
hosts = ["maya"]
families = ["camera"]
families = ["camera", "matchmove"]
scene_type = "ma"
keep_image_planes = True
def process(self, instance):
"""Plugin entry point."""
# get settings
@ -131,15 +141,15 @@ class ExtractCameraMayaScene(publish.Extractor):
"bake to world space is ignored...")
# get cameras
members = cmds.ls(instance.data['setMembers'], leaf=True, shapes=True,
long=True, dag=True)
cameras = cmds.ls(members, leaf=True, shapes=True, long=True,
dag=True, type="camera")
members = set(cmds.ls(instance.data['setMembers'], leaf=True,
shapes=True, long=True, dag=True))
cameras = set(cmds.ls(members, leaf=True, shapes=True, long=True,
dag=True, type="camera"))
# validate required settings
assert isinstance(step, float), "Step must be a float value"
camera = cameras[0]
transform = cmds.listRelatives(camera, parent=True, fullPath=True)
transforms = cmds.listRelatives(list(cameras),
parent=True, fullPath=True)
# Define extract output file path
dir_path = self.staging_dir(instance)
@ -151,23 +161,21 @@ class ExtractCameraMayaScene(publish.Extractor):
with lib.evaluation("off"):
with lib.suspended_refresh():
if bake_to_worldspace:
self.log.debug(
"Performing camera bakes: {}".format(transform))
baked = lib.bake_to_world_space(
transform,
transforms,
frame_range=[start, end],
step=step
)
baked_camera_shapes = cmds.ls(baked,
type="camera",
dag=True,
shapes=True,
long=True)
baked_camera_shapes = set(cmds.ls(baked,
type="camera",
dag=True,
shapes=True,
long=True))
members = members + baked_camera_shapes
members.remove(camera)
members.update(baked_camera_shapes)
members.difference_update(cameras)
else:
baked_camera_shapes = cmds.ls(cameras,
baked_camera_shapes = cmds.ls(list(cameras),
type="camera",
dag=True,
shapes=True,
@ -186,19 +194,28 @@ class ExtractCameraMayaScene(publish.Extractor):
unlock(plug)
cmds.setAttr(plug, value)
self.log.debug("Performing extraction..")
cmds.select(cmds.ls(members, dag=True,
shapes=True, long=True), noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
exportSelected=True,
preserveReferences=False,
constructionHistory=False,
channels=True, # allow animation
constraints=False,
shader=False,
expressions=False)
attr_values = self.get_attr_values_from_data(
instance.data)
keep_image_planes = attr_values.get("keep_image_planes")
with transfer_image_planes(sorted(cameras),
sorted(baked_camera_shapes),
keep_image_planes):
self.log.info("Performing extraction..")
cmds.select(cmds.ls(list(members), dag=True,
shapes=True, long=True),
noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
exportSelected=True,
preserveReferences=False,
constructionHistory=False,
channels=True, # allow animation
constraints=False,
shader=False,
expressions=False)
# Delete the baked hierarchy
if bake_to_worldspace:
@ -219,3 +236,62 @@ class ExtractCameraMayaScene(publish.Extractor):
self.log.debug("Extracted instance '{0}' to: {1}".format(
instance.name, path))
@classmethod
def get_attribute_defs(cls):
defs = super(ExtractCameraMayaScene, cls).get_attribute_defs()
defs.extend([
BoolDef("keep_image_planes",
label="Keep Image Planes",
tooltip="Preserving connected image planes on camera",
default=cls.keep_image_planes),
])
return defs
@contextlib.contextmanager
def transfer_image_planes(source_cameras, target_cameras,
keep_input_connections):
"""Reattaches image planes to baked or original cameras.
Baked cameras are duplicates of original ones.
This attaches it to duplicated camera properly and after
export it reattaches it back to original to keep image plane in workfile.
"""
originals = {}
try:
for source_camera, target_camera in zip(source_cameras,
target_cameras):
image_planes = cmds.listConnections(source_camera,
type="imagePlane") or []
# Split of the parent path they are attached - we want
# the image plane node name.
# TODO: Does this still mean the image plane name is unique?
image_planes = [x.split("->", 1)[1] for x in image_planes]
if not image_planes:
continue
originals[source_camera] = []
for image_plane in image_planes:
if keep_input_connections:
if source_camera == target_camera:
continue
_attach_image_plane(target_camera, image_plane)
else: # explicitly dettaching image planes
cmds.imagePlane(image_plane, edit=True, detach=True)
originals[source_camera].append(image_plane)
yield
finally:
for camera, image_planes in originals.items():
for image_plane in image_planes:
_attach_image_plane(camera, image_plane)
def _attach_image_plane(camera, image_plane):
cmds.imagePlane(image_plane, edit=True, detach=True)
cmds.imagePlane(image_plane, edit=True, camera=camera)

View file

@ -3423,3 +3423,55 @@ def create_viewer_profile_string(viewer, display=None, path_like=False):
if path_like:
return "{}/{}".format(display, viewer)
return "{} ({})".format(viewer, display)
def get_head_filename_without_hashes(original_path, name):
"""Function to get the renamed head filename without frame hashes
To avoid the system being confused on finding the filename with
frame hashes if the head of the filename has the hashed symbol
Examples:
>>> get_head_filename_without_hashes("render.####.exr", "baking")
render.baking.####.exr
>>> get_head_filename_without_hashes("render.%04d.exr", "tag")
render.tag.%d.exr
>>> get_head_filename_without_hashes("exr.####.exr", "foo")
exr.foo.%04d.exr
Args:
original_path (str): the filename with frame hashes
name (str): the name of the tags
Returns:
str: the renamed filename with the tag
"""
filename = os.path.basename(original_path)
def insert_name(matchobj):
return "{}.{}".format(name, matchobj.group(0))
return re.sub(r"(%\d*d)|#+", insert_name, filename)
def get_filenames_without_hash(filename, frame_start, frame_end):
"""Get filenames without frame hash
i.e. "renderCompositingMain.baking.0001.exr"
Args:
filename (str): filename with frame hash
frame_start (str): start of the frame
frame_end (str): end of the frame
Returns:
list: filename per frame of the sequence
"""
filenames = []
for frame in range(int(frame_start), (int(frame_end) + 1)):
if "#" in filename:
# use regex to convert #### to {:0>4}
def replace(match):
return "{{:0>{}}}".format(len(match.group()))
filename_without_hashes = re.sub("#+", replace, filename)
new_filename = filename_without_hashes.format(frame)
filenames.append(new_filename)
return filenames

View file

@ -21,6 +21,9 @@ from openpype.pipeline import (
CreatedInstance,
get_current_task_name
)
from openpype.lib.transcoding import (
VIDEO_EXTENSIONS
)
from .lib import (
INSTANCE_DATA_KNOB,
Knobby,
@ -35,7 +38,9 @@ from .lib import (
get_node_data,
get_view_process_node,
get_viewer_config_from_string,
deprecated
deprecated,
get_head_filename_without_hashes,
get_filenames_without_hash
)
from .pipeline import (
list_instances,
@ -634,6 +639,10 @@ class ExporterReview(object):
"frameStart": self.first_frame,
"frameEnd": self.last_frame,
})
if ".{}".format(self.ext) not in VIDEO_EXTENSIONS:
filenames = get_filenames_without_hash(
self.file, self.first_frame, self.last_frame)
repre["files"] = filenames
if self.multiple_presets:
repre["outputName"] = self.name
@ -808,6 +817,18 @@ class ExporterReviewMov(ExporterReview):
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
if ".{}".format(self.ext) not in VIDEO_EXTENSIONS:
# filename would be with frame hashes if
# the file extension is not in video format
filename = get_head_filename_without_hashes(
self.path_in, self.name)
self.file = filename
# make sure the filename are in
# correct image output format
if ".{}".format(self.ext) not in self.file:
filename_no_ext, _ = os.path.splitext(filename)
self.file = "{}.{}".format(filename_no_ext, self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")
@ -933,7 +954,6 @@ class ExporterReviewMov(ExporterReview):
self.log.debug("Path: {}".format(self.path))
write_node["file"].setValue(str(self.path))
write_node["file_type"].setValue(str(self.ext))
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
# TODO shouldn't this come from settings on outputs?
try:

View file

@ -8,15 +8,16 @@ from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import maintained_selection
class ExtractReviewDataMov(publish.Extractor):
"""Extracts movie and thumbnail with baked in luts
class ExtractReviewIntermediates(publish.Extractor):
"""Extracting intermediate videos or sequences with
thumbnail for transcoding.
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Review Data Mov"
label = "Extract Review Intermediates"
families = ["review"]
hosts = ["nuke"]
@ -25,6 +26,22 @@ class ExtractReviewDataMov(publish.Extractor):
viewer_lut_raw = None
outputs = {}
@classmethod
def apply_settings(cls, project_settings):
"""Apply the settings from the deprecated
ExtractReviewDataMov plugin for backwards compatibility
"""
nuke_publish = project_settings["nuke"]["publish"]
deprecated_setting = nuke_publish["ExtractReviewDataMov"]
current_setting = nuke_publish["ExtractReviewIntermediates"]
if deprecated_setting["enabled"]:
# Use deprecated settings if they are still enabled
cls.viewer_lut_raw = deprecated_setting["viewer_lut_raw"]
cls.outputs = deprecated_setting["outputs"]
elif current_setting["enabled"]:
cls.viewer_lut_raw = current_setting["viewer_lut_raw"]
cls.outputs = current_setting["outputs"]
def process(self, instance):
families = set(instance.data["families"])

View file

@ -0,0 +1,181 @@
# -*- coding: utf-8 -*-
"""Submitting render job to Deadline."""
import os
import getpass
import attr
from datetime import datetime
import bpy
from openpype.lib import is_running_from_build
from openpype.pipeline import legacy_io
from openpype.pipeline.farm.tools import iter_expected_files
from openpype.tests.lib import is_in_tests
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@attr.s
class BlenderPluginInfo():
SceneFile = attr.ib(default=None) # Input
Version = attr.ib(default=None) # Mandatory for Deadline
SaveFile = attr.ib(default=True)
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
label = "Submit Render to Deadline"
hosts = ["blender"]
families = ["render.farm"]
use_published = True
priority = 50
chunk_size = 1
jobInfo = {}
pluginInfo = {}
group = None
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="Blender")
job_info.update(self.jobInfo)
instance = self._instance
context = instance.context
# Always use the original work file name for the Job name even when
# rendering is done from the published Work File. The original work
# file name is clearer because it can also have subversion strings,
# etc. which are stripped for the published file.
src_filepath = context.data["currentFile"]
src_filename = os.path.basename(src_filepath)
if is_in_tests():
src_filename += datetime.now().strftime("%d%m%Y%H%M%S")
job_info.Name = f"{src_filename} - {instance.name}"
job_info.BatchName = src_filename
instance.data.get("blenderRenderPlugin", "Blender")
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
# Deadline requires integers in frame range
frames = "{start}-{end}x{step}".format(
start=int(instance.data["frameStartHandle"]),
end=int(instance.data["frameEndHandle"]),
step=int(instance.data["byFrameStep"]),
)
job_info.Frames = frames
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.Comment = context.data.get("comment")
job_info.Priority = instance.data.get("priority", self.priority)
if self.group != "none" and self.group:
job_info.Group = self.group
attr_values = self.get_attr_values_from_data(instance.data)
render_globals = instance.data.setdefault("renderGlobals", {})
machine_list = attr_values.get("machineList", "")
if machine_list:
if attr_values.get("whitelist", True):
machine_list_key = "Whitelist"
else:
machine_list_key = "Blacklist"
render_globals[machine_list_key] = machine_list
job_info.Priority = attr_values.get("priority")
job_info.ChunkSize = attr_values.get("chunkSize")
# Add options from RenderGlobals
render_globals = instance.data.get("renderGlobals", {})
job_info.update(render_globals)
keys = [
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"OPENPYPE_SG_USER",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV"
"IS_TEST"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
for key in keys:
value = environment.get(key)
if not value:
continue
job_info.EnvironmentKeyValue[key] = value
# to recognize job from PYPE for turning Event On/Off
job_info.add_render_job_env_var()
job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1"
# Adding file dependencies.
if self.asset_dependencies:
dependencies = instance.context.data["fileDependencies"]
for dependency in dependencies:
job_info.AssetDependency += dependency
# Add list of expected files to job
# ---------------------------------
exp = instance.data.get("expectedFiles")
for filepath in iter_expected_files(exp):
job_info.OutputDirectory += os.path.dirname(filepath)
job_info.OutputFilename += os.path.basename(filepath)
return job_info
def get_plugin_info(self):
plugin_info = BlenderPluginInfo(
SceneFile=self.scene_path,
Version=bpy.app.version_string,
SaveFile=True,
)
plugin_payload = attr.asdict(plugin_info)
# Patching with pluginInfo from settings
for key, value in self.pluginInfo.items():
plugin_payload[key] = value
return plugin_payload
def process_submission(self):
instance = self._instance
expected_files = instance.data["expectedFiles"]
if not expected_files:
raise RuntimeError("No Render Elements found!")
first_file = next(iter_expected_files(expected_files))
output_dir = os.path.dirname(first_file)
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
payload = self.assemble_payload()
return self.submit(payload)
def from_published_scene(self):
"""
This is needed to set the correct path for the json metadata. Because
the rendering path is set in the blend file during the collection,
and the path is adjusted to use the published scene, this ensures that
the metadata and the rendered files are in the same location.
"""
return super().from_published_scene(False)

View file

@ -96,7 +96,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
targets = ["local"]
hosts = ["fusion", "max", "maya", "nuke", "houdini",
"celaction", "aftereffects", "harmony"]
"celaction", "aftereffects", "harmony", "blender"]
families = ["render.farm", "render.frames_farm",
"prerender.farm", "prerender.frames_farm",
@ -107,6 +107,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
"redshift_rop"]
aov_filter = {"maya": [r".*([Bb]eauty).*"],
"blender": [r".*([Bb]eauty).*"],
"aftereffects": [r".*"], # for everything from AE
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"],

View file

@ -748,7 +748,19 @@ def _convert_nuke_project_settings(ayon_settings, output):
)
new_review_data_outputs = {}
for item in ayon_publish["ExtractReviewDataMov"]["outputs"]:
outputs_settings = None
# Check deprecated ExtractReviewDataMov
# settings for backwards compatibility
deprecrated_review_settings = ayon_publish["ExtractReviewDataMov"]
current_review_settings = (
ayon_publish["ExtractReviewIntermediates"]
)
if deprecrated_review_settings["enabled"]:
outputs_settings = deprecrated_review_settings["outputs"]
elif current_review_settings["enabled"]:
outputs_settings = current_review_settings["outputs"]
for item in outputs_settings:
item_filter = item["filter"]
if "product_names" in item_filter:
item_filter["subsets"] = item_filter.pop("product_names")
@ -767,7 +779,11 @@ def _convert_nuke_project_settings(ayon_settings, output):
name = item.pop("name")
new_review_data_outputs[name] = item
ayon_publish["ExtractReviewDataMov"]["outputs"] = new_review_data_outputs
if deprecrated_review_settings["enabled"]:
deprecrated_review_settings["outputs"] = new_review_data_outputs
elif current_review_settings["enabled"]:
current_review_settings["outputs"] = new_review_data_outputs
collect_instance_data = ayon_publish["CollectInstanceData"]
if "sync_workfile_version_on_product_types" in collect_instance_data:

View file

@ -17,6 +17,14 @@
"rules": {}
}
},
"RenderSettings": {
"default_render_image_folder": "renders/blender",
"aov_separator": "underscore",
"image_format": "exr",
"multilayer_exr": true,
"aov_list": [],
"custom_passes": []
},
"workfile_builder": {
"create_first_version": false,
"custom_templates": []
@ -27,6 +35,22 @@
"optional": true,
"active": true
},
"ValidateFileSaved": {
"enabled": true,
"optional": false,
"active": true,
"exclude_families": []
},
"ValidateRenderCameraIsSet": {
"enabled": true,
"optional": false,
"active": true
},
"ValidateDeadlinePublish": {
"enabled": true,
"optional": false,
"active": true
},
"ValidateMeshHasUvs": {
"enabled": true,
"optional": true,

View file

@ -99,6 +99,15 @@
"deadline_chunk_size": 10,
"deadline_job_delay": "00:00:00:00"
},
"BlenderSubmitDeadline": {
"enabled": true,
"optional": false,
"active": true,
"use_published": true,
"priority": 50,
"chunk_size": 10,
"group": "none"
},
"ProcessSubmittedJobOnFarm": {
"enabled": true,
"deadline_department": "",
@ -112,6 +121,9 @@
"maya": [
".*([Bb]eauty).*"
],
"blender": [
".*([Bb]eauty).*"
],
"aftereffects": [
".*"
],

View file

@ -1338,6 +1338,12 @@
"active": true,
"bake_attributes": []
},
"ExtractCameraMayaScene": {
"enabled": true,
"optional": true,
"active": true,
"keep_image_planes": false
},
"ExtractGLB": {
"enabled": true,
"active": true,

View file

@ -501,6 +501,60 @@
}
}
},
"ExtractReviewIntermediates": {
"enabled": true,
"viewer_lut_raw": false,
"outputs": {
"baking": {
"filter": {
"task_types": [],
"families": [],
"subsets": []
},
"read_raw": false,
"viewer_process_override": "",
"bake_viewer_process": true,
"bake_viewer_input_process": true,
"reformat_nodes_config": {
"enabled": false,
"reposition_nodes": [
{
"node_class": "Reformat",
"knobs": [
{
"type": "text",
"name": "type",
"value": "to format"
},
{
"type": "text",
"name": "format",
"value": "HD_1080"
},
{
"type": "text",
"name": "filter",
"value": "Lanczos6"
},
{
"type": "bool",
"name": "black_outside",
"value": true
},
{
"type": "bool",
"name": "pbb",
"value": false
}
]
}
]
},
"extension": "mov",
"add_custom_tags": []
}
}
},
"ExtractSlateFrame": {
"viewer_lut_raw": false,
"key_value_mapping": {

View file

@ -54,6 +54,110 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "RenderSettings",
"label": "Render Settings",
"children": [
{
"type": "text",
"key": "default_render_image_folder",
"label": "Default render image folder"
},
{
"key": "aov_separator",
"label": "AOV Separator Character",
"type": "enum",
"multiselection": false,
"defaults": "underscore",
"enum_items": [
{"dash": "- (dash)"},
{"underscore": "_ (underscore)"},
{"dot": ". (dot)"}
]
},
{
"key": "image_format",
"label": "Output Image Format",
"type": "enum",
"multiselection": false,
"defaults": "exr",
"enum_items": [
{"exr": "OpenEXR"},
{"bmp": "BMP"},
{"rgb": "Iris"},
{"png": "PNG"},
{"jpg": "JPEG"},
{"jp2": "JPEG 2000"},
{"tga": "Targa"},
{"tif": "TIFF"}
]
},
{
"key": "multilayer_exr",
"type": "boolean",
"label": "Multilayer (EXR)"
},
{
"type": "label",
"label": "Note: Multilayer EXR is only used when output format type set to EXR."
},
{
"key": "aov_list",
"label": "AOVs to create",
"type": "enum",
"multiselection": true,
"defaults": "empty",
"enum_items": [
{"empty": "< empty >"},
{"combined": "Combined"},
{"z": "Z"},
{"mist": "Mist"},
{"normal": "Normal"},
{"diffuse_light": "Diffuse Light"},
{"diffuse_color": "Diffuse Color"},
{"specular_light": "Specular Light"},
{"specular_color": "Specular Color"},
{"volume_light": "Volume Light"},
{"emission": "Emission"},
{"environment": "Environment"},
{"shadow": "Shadow"},
{"ao": "Ambient Occlusion"},
{"denoising": "Denoising"},
{"volume_direct": "Direct Volumetric Scattering"},
{"volume_indirect": "Indirect Volumetric Scattering"}
]
},
{
"type": "label",
"label": "Add custom AOVs. They are added to the view layer and in the Compositing Nodetree,\nbut they need to be added manually to the Shader Nodetree."
},
{
"type": "dict-modifiable",
"store_as_list": true,
"key": "custom_passes",
"label": "Custom Passes",
"use_label_wrap": true,
"object_type": {
"type": "dict",
"children": [
{
"key": "type",
"label": "Type",
"type": "enum",
"multiselection": false,
"default": "COLOR",
"enum_items": [
{"COLOR": "Color"},
{"VALUE": "Value"}
]
}
]
}
}
]
},
{
"type": "schema_template",
"name": "template_workfile_options",

View file

@ -531,6 +531,50 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "BlenderSubmitDeadline",
"label": "Blender Submit to Deadline",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "boolean",
"key": "use_published",
"label": "Use Published scene"
},
{
"type": "number",
"key": "priority",
"label": "Priority"
},
{
"type": "number",
"key": "chunk_size",
"label": "Frame per Task"
},
{
"type": "text",
"key": "group",
"label": "Group Name"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -18,6 +18,39 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ValidateFileSaved",
"label": "Validate File Saved",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "splitter"
},
{
"key": "exclude_families",
"label": "Exclude Families",
"type": "list",
"object_type": "text"
}
]
},
{
"type": "collapsible-wrap",
"label": "Model",
@ -46,6 +79,66 @@
}
]
},
{
"type": "collapsible-wrap",
"label": "Render",
"children": [
{
"type": "schema_template",
"name": "template_publish_plugin",
"template_data": [
{
"type": "dict",
"collapsible": true,
"key": "ValidateRenderCameraIsSet",
"label": "Validate Render Camera Is Set",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ValidateDeadlinePublish",
"label": "Validate Render Output for Deadline",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
}
]
}
]
}
]
},
{
"type": "splitter"
},

View file

@ -978,6 +978,35 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ExtractCameraMayaScene",
"label": "Extract camera to Maya scene",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "boolean",
"key": "keep_image_planes",
"label": "Export Image planes"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -371,6 +371,151 @@
]
},
{
"type": "label",
"label": "^ Settings and for <span style=\"color:#FF0000\";><b>ExtractReviewDataMov</b></span> is deprecated and will be soon removed. <br> Please use <b>ExtractReviewIntermediates</b> instead."
},
{
"type": "dict",
"collapsible": true,
"checkbox_key": "enabled",
"key": "ExtractReviewIntermediates",
"label": "ExtractReviewIntermediates",
"is_group": true,
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "viewer_lut_raw",
"label": "Viewer LUT raw"
},
{
"key": "outputs",
"label": "Output Definitions",
"type": "dict-modifiable",
"highlight_content": true,
"object_type": {
"type": "dict",
"children": [
{
"type": "dict",
"collapsible": false,
"key": "filter",
"label": "Filtering",
"children": [
{
"key": "task_types",
"label": "Task types",
"type": "task-types-enum"
},
{
"key": "families",
"label": "Families",
"type": "list",
"object_type": "text"
},
{
"key": "subsets",
"label": "Subsets",
"type": "list",
"object_type": "text"
}
]
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "read_raw",
"label": "Read colorspace RAW",
"default": false
},
{
"type": "text",
"key": "viewer_process_override",
"label": "Viewer Process colorspace profile override"
},
{
"type": "boolean",
"key": "bake_viewer_process",
"label": "Bake Viewer Process"
},
{
"type": "boolean",
"key": "bake_viewer_input_process",
"label": "Bake Viewer Input Process (LUTs)"
},
{
"type": "separator"
},
{
"key": "reformat_nodes_config",
"type": "dict",
"label": "Reformat Nodes",
"collapsible": true,
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "label",
"label": "Reposition knobs supported only.<br/>You can add multiple reformat nodes <br/>and set their knobs. Order of reformat <br/>nodes is important. First reformat node <br/>will be applied first and last reformat <br/>node will be applied last."
},
{
"key": "reposition_nodes",
"type": "list",
"label": "Reposition nodes",
"object_type": {
"type": "dict",
"children": [
{
"key": "node_class",
"label": "Node class",
"type": "text"
},
{
"type": "schema_template",
"name": "template_nuke_knob_inputs",
"template_data": [
{
"label": "Node knobs",
"key": "knobs"
}
]
}
]
}
}
]
},
{
"type": "separator"
},
{
"type": "text",
"key": "extension",
"label": "Write node file type"
},
{
"key": "add_custom_tags",
"label": "Add custom tags",
"type": "list",
"object_type": "text"
}
]
}
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -914,10 +914,12 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon):
# Controller actions
@abstractmethod
def open_workfile(self, filepath):
"""Open a workfile.
def open_workfile(self, folder_id, task_id, filepath):
"""Open a workfile for context.
Args:
folder_id (str): Folder id.
task_id (str): Task id.
filepath (str): Workfile path.
"""

View file

@ -452,12 +452,12 @@ class BaseWorkfileController(
self._emit_event("controller.refresh.finished")
# Controller actions
def open_workfile(self, filepath):
def open_workfile(self, folder_id, task_id, filepath):
self._emit_event("open_workfile.started")
failed = False
try:
self._host_open_workfile(filepath)
self._open_workfile(folder_id, task_id, filepath)
except Exception:
failed = True
@ -575,6 +575,53 @@ class BaseWorkfileController(
self._expected_selection.get_expected_selection_data(),
)
def _get_event_context_data(
self, project_name, folder_id, task_id, folder=None, task=None
):
if folder is None:
folder = self.get_folder_entity(folder_id)
if task is None:
task = self.get_task_entity(task_id)
# NOTE keys should be OpenPype compatible
return {
"project_name": project_name,
"folder_id": folder_id,
"asset_id": folder_id,
"asset_name": folder["name"],
"task_id": task_id,
"task_name": task["name"],
"host_name": self.get_host_name(),
}
def _open_workfile(self, folder_id, task_id, filepath):
project_name = self.get_current_project_name()
event_data = self._get_event_context_data(
project_name, folder_id, task_id
)
event_data["filepath"] = filepath
emit_event("workfile.open.before", event_data, source="workfiles.tool")
# Change context
task_name = event_data["task_name"]
if (
folder_id != self.get_current_folder_id()
or task_name != self.get_current_task_name()
):
# Use OpenPype asset-like object
asset_doc = get_asset_by_id(
event_data["project_name"],
event_data["folder_id"],
)
change_current_context(
asset_doc,
event_data["task_name"]
)
self._host_open_workfile(filepath)
emit_event("workfile.open.after", event_data, source="workfiles.tool")
def _save_as_workfile(
self,
folder_id,
@ -591,18 +638,14 @@ class BaseWorkfileController(
task_name = task["name"]
# QUESTION should the data be different for 'before' and 'after'?
# NOTE keys should be OpenPype compatible
event_data = {
"project_name": project_name,
"folder_id": folder_id,
"asset_id": folder_id,
"asset_name": folder["name"],
"task_id": task_id,
"task_name": task_name,
"host_name": self.get_host_name(),
event_data = self._get_event_context_data(
project_name, folder_id, task_id, folder, task
)
event_data.update({
"filename": filename,
"workdir_path": workdir,
}
})
emit_event("workfile.save.before", event_data, source="workfiles.tool")
# Create workfiles root folder

View file

@ -106,7 +106,8 @@ class FilesWidget(QtWidgets.QWidget):
self._on_published_cancel_clicked)
self._selected_folder_id = None
self._selected_tak_name = None
self._selected_task_id = None
self._selected_task_name = None
self._pre_select_folder_id = None
self._pre_select_task_name = None
@ -178,7 +179,7 @@ class FilesWidget(QtWidgets.QWidget):
# -------------------------------------------------------------
# Workarea workfiles
# -------------------------------------------------------------
def _open_workfile(self, filepath):
def _open_workfile(self, folder_id, task_name, filepath):
if self._controller.has_unsaved_changes():
result = self._save_changes_prompt()
if result is None:
@ -186,12 +187,15 @@ class FilesWidget(QtWidgets.QWidget):
if result:
self._controller.save_current_workfile()
self._controller.open_workfile(filepath)
self._controller.open_workfile(folder_id, task_name, filepath)
def _on_workarea_open_clicked(self):
path = self._workarea_widget.get_selected_path()
if path:
self._open_workfile(path)
if not path:
return
folder_id = self._selected_folder_id
task_id = self._selected_task_id
self._open_workfile(folder_id, task_id, path)
def _on_current_open_requests(self):
self._on_workarea_open_clicked()
@ -238,8 +242,12 @@ class FilesWidget(QtWidgets.QWidget):
}
filepath = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0]
if filepath:
self._open_workfile(filepath)
if not filepath:
return
folder_id = self._selected_folder_id
task_id = self._selected_task_id
self._open_workfile(folder_id, task_id, filepath)
def _on_workarea_save_clicked(self):
result = self._exec_save_as_dialog()
@ -279,10 +287,11 @@ class FilesWidget(QtWidgets.QWidget):
def _on_task_changed(self, event):
self._selected_folder_id = event["folder_id"]
self._selected_tak_name = event["task_name"]
self._selected_task_id = event["task_id"]
self._selected_task_name = event["task_name"]
self._valid_selected_context = (
self._selected_folder_id is not None
and self._selected_tak_name is not None
and self._selected_task_id is not None
)
self._update_published_btns_state()
@ -311,7 +320,7 @@ class FilesWidget(QtWidgets.QWidget):
if enabled:
self._pre_select_folder_id = self._selected_folder_id
self._pre_select_task_name = self._selected_tak_name
self._pre_select_task_name = self._selected_task_name
else:
self._pre_select_folder_id = None
self._pre_select_task_name = None
@ -334,7 +343,7 @@ class FilesWidget(QtWidgets.QWidget):
return True
if self._pre_select_task_name is None:
return False
return self._pre_select_task_name != self._selected_tak_name
return self._pre_select_task_name != self._selected_task_name
def _on_published_cancel_clicked(self):
folder_id = self._pre_select_folder_id

View file

@ -176,11 +176,10 @@ class PublishReportMaker:
self._create_discover_result = None
self._convert_discover_result = None
self._publish_discover_result = None
self._plugin_data = []
self._plugin_data_with_plugin = []
self._stored_plugins = []
self._current_plugin_data = []
self._plugin_data_by_id = {}
self._current_plugin = None
self._current_plugin_data = {}
self._all_instances_by_id = {}
self._current_context = None
@ -192,8 +191,9 @@ class PublishReportMaker:
create_context.convertor_discover_result
)
self._publish_discover_result = create_context.publish_discover_result
self._plugin_data = []
self._plugin_data_with_plugin = []
self._plugin_data_by_id = {}
self._current_plugin = None
self._current_plugin_data = {}
self._all_instances_by_id = {}
self._current_context = context
@ -210,18 +210,11 @@ class PublishReportMaker:
if self._current_plugin_data:
self._current_plugin_data["passed"] = True
self._current_plugin = plugin
self._current_plugin_data = self._add_plugin_data_item(plugin)
def _get_plugin_data_item(self, plugin):
store_item = None
for item in self._plugin_data_with_plugin:
if item["plugin"] is plugin:
store_item = item["data"]
break
return store_item
def _add_plugin_data_item(self, plugin):
if plugin in self._stored_plugins:
if plugin.id in self._plugin_data_by_id:
# A plugin would be processed more than once. What can cause it:
# - there is a bug in controller
# - plugin class is imported into multiple files
@ -229,15 +222,9 @@ class PublishReportMaker:
raise ValueError(
"Plugin '{}' is already stored".format(str(plugin)))
self._stored_plugins.append(plugin)
plugin_data_item = self._create_plugin_data_item(plugin)
self._plugin_data_by_id[plugin.id] = plugin_data_item
self._plugin_data_with_plugin.append({
"plugin": plugin,
"data": plugin_data_item
})
self._plugin_data.append(plugin_data_item)
return plugin_data_item
def _create_plugin_data_item(self, plugin):
@ -278,7 +265,7 @@ class PublishReportMaker:
"""Add result of single action."""
plugin = result["plugin"]
store_item = self._get_plugin_data_item(plugin)
store_item = self._plugin_data_by_id.get(plugin.id)
if store_item is None:
store_item = self._add_plugin_data_item(plugin)
@ -300,14 +287,24 @@ class PublishReportMaker:
instance, instance in self._current_context
)
plugins_data = copy.deepcopy(self._plugin_data)
if plugins_data and not plugins_data[-1]["passed"]:
plugins_data[-1]["passed"] = True
plugins_data_by_id = copy.deepcopy(
self._plugin_data_by_id
)
# Ensure the current plug-in is marked as `passed` in the result
# so that it shows on reports for paused publishes
if self._current_plugin is not None:
current_plugin_data = plugins_data_by_id.get(
self._current_plugin.id
)
if current_plugin_data and not current_plugin_data["passed"]:
current_plugin_data["passed"] = True
if publish_plugins:
for plugin in publish_plugins:
if plugin not in self._stored_plugins:
plugins_data.append(self._create_plugin_data_item(plugin))
if plugin.id not in plugins_data_by_id:
plugins_data_by_id[plugin.id] = \
self._create_plugin_data_item(plugin)
reports = []
if self._create_discover_result is not None:
@ -328,7 +325,7 @@ class PublishReportMaker:
)
return {
"plugins_data": plugins_data,
"plugins_data": list(plugins_data_by_id.values()),
"instances": instances_details,
"context": self._extract_context_data(self._current_context),
"crashed_file_paths": crashed_file_paths,

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.17.1"
__version__ = "3.17.2-nightly.1"

View file

@ -9,6 +9,10 @@ from .publish_plugins import (
PublishPuginsModel,
DEFAULT_BLENDER_PUBLISH_SETTINGS
)
from .render_settings import (
RenderSettingsModel,
DEFAULT_RENDER_SETTINGS
)
class UnitScaleSettingsModel(BaseSettingsModel):
@ -37,6 +41,8 @@ class BlenderSettings(BaseSettingsModel):
default_factory=BlenderImageIOModel,
title="Color Management (ImageIO)"
)
render_settings: RenderSettingsModel = Field(
default_factory=RenderSettingsModel, title="Render Settings")
workfile_builder: TemplateWorkfileBaseOptions = Field(
default_factory=TemplateWorkfileBaseOptions,
title="Workfile Builder"
@ -55,6 +61,7 @@ DEFAULT_VALUES = {
},
"set_frames_startup": True,
"set_resolution_startup": True,
"render_settings": DEFAULT_RENDER_SETTINGS,
"publish": DEFAULT_BLENDER_PUBLISH_SETTINGS,
"workfile_builder": {
"create_first_version": False,

View file

@ -26,6 +26,16 @@ class ValidatePluginModel(BaseSettingsModel):
active: bool = Field(title="Active")
class ValidateFileSavedModel(BaseSettingsModel):
enabled: bool = Field(title="ValidateFileSaved")
optional: bool = Field(title="Optional")
active: bool = Field(title="Active")
exclude_families: list[str] = Field(
default_factory=list,
title="Exclude product types"
)
class ExtractBlendModel(BaseSettingsModel):
enabled: bool = Field(True)
optional: bool = Field(title="Optional")
@ -53,6 +63,21 @@ class PublishPuginsModel(BaseSettingsModel):
title="Validate Camera Zero Keyframe",
section="Validators"
)
ValidateFileSaved: ValidateFileSavedModel = Field(
default_factory=ValidateFileSavedModel,
title="Validate File Saved",
section="Validators"
)
ValidateRenderCameraIsSet: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Camera Is Set",
section="Validators"
)
ValidateDeadlinePublish: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Output for Deadline",
section="Validators"
)
ValidateMeshHasUvs: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Mesh Has Uvs"
@ -118,6 +143,22 @@ DEFAULT_BLENDER_PUBLISH_SETTINGS = {
"optional": True,
"active": True
},
"ValidateFileSaved": {
"enabled": True,
"optional": False,
"active": True,
"exclude_families": []
},
"ValidateRenderCameraIsSet": {
"enabled": True,
"optional": False,
"active": True
},
"ValidateDeadlinePublish": {
"enabled": True,
"optional": False,
"active": True
},
"ValidateMeshHasUvs": {
"enabled": True,
"optional": True,

View file

@ -0,0 +1,109 @@
"""Providing models and values for Blender Render Settings."""
from pydantic import Field
from ayon_server.settings import BaseSettingsModel
def aov_separators_enum():
return [
{"value": "dash", "label": "- (dash)"},
{"value": "underscore", "label": "_ (underscore)"},
{"value": "dot", "label": ". (dot)"}
]
def image_format_enum():
return [
{"value": "exr", "label": "OpenEXR"},
{"value": "bmp", "label": "BMP"},
{"value": "rgb", "label": "Iris"},
{"value": "png", "label": "PNG"},
{"value": "jpg", "label": "JPEG"},
{"value": "jp2", "label": "JPEG 2000"},
{"value": "tga", "label": "Targa"},
{"value": "tif", "label": "TIFF"},
]
def aov_list_enum():
return [
{"value": "empty", "label": "< none >"},
{"value": "combined", "label": "Combined"},
{"value": "z", "label": "Z"},
{"value": "mist", "label": "Mist"},
{"value": "normal", "label": "Normal"},
{"value": "diffuse_light", "label": "Diffuse Light"},
{"value": "diffuse_color", "label": "Diffuse Color"},
{"value": "specular_light", "label": "Specular Light"},
{"value": "specular_color", "label": "Specular Color"},
{"value": "volume_light", "label": "Volume Light"},
{"value": "emission", "label": "Emission"},
{"value": "environment", "label": "Environment"},
{"value": "shadow", "label": "Shadow"},
{"value": "ao", "label": "Ambient Occlusion"},
{"value": "denoising", "label": "Denoising"},
{"value": "volume_direct", "label": "Direct Volumetric Scattering"},
{"value": "volume_indirect", "label": "Indirect Volumetric Scattering"}
]
def custom_passes_types_enum():
return [
{"value": "COLOR", "label": "Color"},
{"value": "VALUE", "label": "Value"},
]
class CustomPassesModel(BaseSettingsModel):
"""Custom Passes"""
_layout = "compact"
attribute: str = Field("", title="Attribute name")
value: str = Field(
"COLOR",
title="Type",
enum_resolver=custom_passes_types_enum
)
class RenderSettingsModel(BaseSettingsModel):
default_render_image_folder: str = Field(
title="Default Render Image Folder"
)
aov_separator: str = Field(
"underscore",
title="AOV Separator Character",
enum_resolver=aov_separators_enum
)
image_format: str = Field(
"exr",
title="Image Format",
enum_resolver=image_format_enum
)
multilayer_exr: bool = Field(
title="Multilayer (EXR)"
)
aov_list: list[str] = Field(
default_factory=list,
enum_resolver=aov_list_enum,
title="AOVs to create"
)
custom_passes: list[CustomPassesModel] = Field(
default_factory=list,
title="Custom Passes",
description=(
"Add custom AOVs. They are added to the view layer and in the "
"Compositing Nodetree,\nbut they need to be added manually to "
"the Shader Nodetree."
)
)
DEFAULT_RENDER_SETTINGS = {
"default_render_image_folder": "renders/blender",
"aov_separator": "underscore",
"image_format": "exr",
"multilayer_exr": True,
"aov_list": [],
"custom_passes": []
}

View file

@ -1 +1 @@
__version__ = "0.1.1"
__version__ = "0.1.3"

View file

@ -208,6 +208,16 @@ class CelactionSubmitDeadlineModel(BaseSettingsModel):
)
class BlenderSubmitDeadlineModel(BaseSettingsModel):
enabled: bool = Field(True)
optional: bool = Field(title="Optional")
active: bool = Field(title="Active")
use_published: bool = Field(title="Use Published scene")
priority: int = Field(title="Priority")
chunk_size: int = Field(title="Frame per Task")
group: str = Field("", title="Group Name")
class AOVFilterSubmodel(BaseSettingsModel):
_layout = "expanded"
name: str = Field(title="Host")
@ -276,8 +286,10 @@ class PublishPluginsModel(BaseSettingsModel):
title="After Effects to deadline")
CelactionSubmitDeadline: CelactionSubmitDeadlineModel = Field(
default_factory=CelactionSubmitDeadlineModel,
title="Celaction Submit Deadline"
)
title="Celaction Submit Deadline")
BlenderSubmitDeadline: BlenderSubmitDeadlineModel = Field(
default_factory=BlenderSubmitDeadlineModel,
title="Blender Submit Deadline")
ProcessSubmittedJobOnFarm: ProcessSubmittedJobOnFarmModel = Field(
default_factory=ProcessSubmittedJobOnFarmModel,
title="Process submitted job on farm.")
@ -384,6 +396,15 @@ DEFAULT_DEADLINE_PLUGINS_SETTINGS = {
"deadline_chunk_size": 10,
"deadline_job_delay": "00:00:00:00"
},
"BlenderSubmitDeadline": {
"enabled": True,
"optional": False,
"active": True,
"use_published": True,
"priority": 50,
"chunk_size": 10,
"group": "none"
},
"ProcessSubmittedJobOnFarm": {
"enabled": True,
"deadline_department": "",
@ -400,6 +421,12 @@ DEFAULT_DEADLINE_PLUGINS_SETTINGS = {
".*([Bb]eauty).*"
]
},
{
"name": "blender",
"value": [
".*([Bb]eauty).*"
]
},
{
"name": "aftereffects",
"value": [

View file

@ -149,7 +149,7 @@ class ReformatNodesConfigModel(BaseSettingsModel):
)
class BakingStreamModel(BaseSettingsModel):
class IntermediateOutputModel(BaseSettingsModel):
name: str = Field(title="Output name")
filter: BakingStreamFilterModel = Field(
title="Filter", default_factory=BakingStreamFilterModel)
@ -166,9 +166,21 @@ class BakingStreamModel(BaseSettingsModel):
class ExtractReviewDataMovModel(BaseSettingsModel):
"""[deprecated] use Extract Review Data Baking
Streams instead.
"""
enabled: bool = Field(title="Enabled")
viewer_lut_raw: bool = Field(title="Viewer lut raw")
outputs: list[BakingStreamModel] = Field(
outputs: list[IntermediateOutputModel] = Field(
default_factory=list,
title="Baking streams"
)
class ExtractReviewIntermediatesModel(BaseSettingsModel):
enabled: bool = Field(title="Enabled")
viewer_lut_raw: bool = Field(title="Viewer lut raw")
outputs: list[IntermediateOutputModel] = Field(
default_factory=list,
title="Baking streams"
)
@ -270,6 +282,10 @@ class PublishPuginsModel(BaseSettingsModel):
title="Extract Review Data Mov",
default_factory=ExtractReviewDataMovModel
)
ExtractReviewIntermediates: ExtractReviewIntermediatesModel = Field(
title="Extract Review Intermediates",
default_factory=ExtractReviewIntermediatesModel
)
ExtractSlateFrame: ExtractSlateFrameModel = Field(
title="Extract Slate Frame",
default_factory=ExtractSlateFrameModel
@ -465,6 +481,61 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = {
}
]
},
"ExtractReviewIntermediates": {
"enabled": True,
"viewer_lut_raw": False,
"outputs": [
{
"name": "baking",
"filter": {
"task_types": [],
"product_types": [],
"product_names": []
},
"read_raw": False,
"viewer_process_override": "",
"bake_viewer_process": True,
"bake_viewer_input_process": True,
"reformat_nodes_config": {
"enabled": False,
"reposition_nodes": [
{
"node_class": "Reformat",
"knobs": [
{
"type": "text",
"name": "type",
"text": "to format"
},
{
"type": "text",
"name": "format",
"text": "HD_1080"
},
{
"type": "text",
"name": "filter",
"text": "Lanczos6"
},
{
"type": "bool",
"name": "black_outside",
"boolean": True
},
{
"type": "bool",
"name": "pbb",
"boolean": False
}
]
}
]
},
"extension": "mov",
"add_custom_tags": []
}
]
},
"ExtractSlateFrame": {
"viewer_lut_raw": False,
"key_value_mapping": {

View file

@ -1 +1 @@
__version__ = "0.1.2"
__version__ = "0.1.3"

View file

@ -33,39 +33,41 @@ The Instances are categorized into families based on what type of data the
Following family definitions and requirements are OpenPype defaults and what we consider good industry practice, but most of the requirements can be easily altered to suit the studio or project needs.
Here's a list of supported families
| Family | Comment | Example Subsets |
| ----------------------- | ------------------------------------------------ | ------------------------- |
| [Model](#model) | Cleaned geo without materials | main, proxy, broken |
| [Look](#look) | Package of shaders, assignments and textures | main, wet, dirty |
| [Rig](#rig) | Characters or props with animation controls | main, deform, sim |
| [Assembly](#assembly) | A complex model made from multiple other models. | main, deform, sim |
| [Layout](#layout) | Simple representation of the environment | main, |
| [Setdress](#setdress) | Environment containing only referenced assets | main, |
| [Camera](#camera) | May contain trackers or proxy geo | main, tracked, anim |
| [Animation](#animation) | Animation exported from a rig. | characterA, vehicleB |
| [Cache](#cache) | Arbitrary animated geometry or fx cache | rest, ROM , pose01 |
| MayaAscii | Maya publishes that don't fit other categories | |
| [Render](#render) | Rendered frames from CG or Comp | |
| RenderSetup | Scene render settings, AOVs and layers | |
| Plate | Ingested, transcode, conformed footage | raw, graded, imageplane |
| Write | Nuke write nodes for rendering | |
| Image | Any non-plate image to be used by artists | Reference, ConceptArt |
| LayeredImage | Software agnostic layered image with metadata | Reference, ConceptArt |
| Review | Reviewable video or image. | |
| Matchmove | Matchmoved camera, potentially with geometry | main |
| Workfile | Backup of the workfile with all its content | uses the task name |
| Nukenodes | Any collection of nuke nodes | maskSetup, usefulBackdrop |
| Yeticache | Cached out yeti fur setup | |
| YetiRig | Yeti groom ready to be applied to geometry cache | main, destroyed |
| VrayProxy | Vray proxy geometry for rendering | |
| VrayScene | Vray full scene export | |
| ArnodldStandin | All arnold .ass archives for rendering | main, wet, dirty |
| LUT | | |
| Nukenodes | | |
| Gizmo | | |
| Nukenodes | | |
| Harmony.template | | |
| Harmony.palette | | |
| Family | Comment | Example Subsets |
|-------------------------|-------------------------------------------------------| ------------------------- |
| [Model](#model) | Cleaned geo without materials | main, proxy, broken |
| [Look](#look) | Package of shaders, assignments and textures | main, wet, dirty |
| [Rig](#rig) | Characters or props with animation controls | main, deform, sim |
| [Assembly](#assembly) | A complex model made from multiple other models. | main, deform, sim |
| [Layout](#layout) | Simple representation of the environment | main, |
| [Setdress](#setdress) | Environment containing only referenced assets | main, |
| [Camera](#camera) | May contain trackers or proxy geo, only single camera | main, tracked, anim |
| | expected. | |
| [Animation](#animation) | Animation exported from a rig. | characterA, vehicleB |
| [Cache](#cache) | Arbitrary animated geometry or fx cache | rest, ROM , pose01 |
| MayaAscii | Maya publishes that don't fit other categories | |
| [Render](#render) | Rendered frames from CG or Comp | |
| RenderSetup | Scene render settings, AOVs and layers | |
| Plate | Ingested, transcode, conformed footage | raw, graded, imageplane |
| Write | Nuke write nodes for rendering | |
| Image | Any non-plate image to be used by artists | Reference, ConceptArt |
| LayeredImage | Software agnostic layered image with metadata | Reference, ConceptArt |
| Review | Reviewable video or image. | |
| Matchmove | Matchmoved camera, potentially with geometry, allows | main |
| | multiple cameras even with planes. | |
| Workfile | Backup of the workfile with all its content | uses the task name |
| Nukenodes | Any collection of nuke nodes | maskSetup, usefulBackdrop |
| Yeticache | Cached out yeti fur setup | |
| YetiRig | Yeti groom ready to be applied to geometry cache | main, destroyed |
| VrayProxy | Vray proxy geometry for rendering | |
| VrayScene | Vray full scene export | |
| ArnodldStandin | All arnold .ass archives for rendering | main, wet, dirty |
| LUT | | |
| Nukenodes | | |
| Gizmo | | |
| Nukenodes | | |
| Harmony.template | | |
| Harmony.palette | | |
@ -161,7 +163,7 @@ Example Representations:
### Animation
Published result of an animation created with a rig. Animation can be extracted
as animation curves, cached out geometry or even fully animated rig with all the controllers.
as animation curves, cached out geometry or even fully animated rig with all the controllers.
Animation cache is usually defined by a rigger in the rig file of a character or
by FX TD in the effects rig, to ensure consistency of outputs.

View file

@ -189,7 +189,7 @@ A profile may generate multiple outputs from a single input. Each output must de
- Profile filtering defines which group of output definitions is used but output definitions may require more specific filters on their own.
- They may filter by subset name (regex can be used) or publish families. Publish families are more complex as are based on knowing code base.
- Filtering by custom tags -> this is used for targeting to output definitions from other extractors using settings (at this moment only Nuke bake extractor can target using custom tags).
- Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags`
- Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewIntermediates/outputs/baking/add_custom_tags`
- Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input.

View file

@ -534,8 +534,7 @@ Plugin responsible for generating thumbnails with colorspace controlled by Nuke.
}
```
### `ExtractReviewDataMov`
### `ExtractReviewIntermediates`
`viewer_lut_raw` **true** will publish the baked mov file without any colorspace conversion. It will be baked with the workfile workspace. This can happen in case the Viewer input process uses baked screen space luts.
#### baking with controlled colorspace