mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
5877dcd93c
23 changed files with 621 additions and 82 deletions
|
|
@ -9,4 +9,4 @@ repos:
|
|||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- id: no-commit-to-branch
|
||||
args: [ '--pattern', '^(?!((enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-]+)$).*' ]
|
||||
args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-]+)$).*' ]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import json
|
||||
import pyblish.api
|
||||
from openpype.hosts.aftereffects.api import list_instances
|
||||
from openpype.hosts.aftereffects.api import AfterEffectsHost
|
||||
|
||||
|
||||
class PreCollectRender(pyblish.api.ContextPlugin):
|
||||
|
|
@ -25,7 +25,7 @@ class PreCollectRender(pyblish.api.ContextPlugin):
|
|||
self.log.debug("Not applicable for New Publisher, skip")
|
||||
return
|
||||
|
||||
for inst in list_instances():
|
||||
for inst in AfterEffectsHost().list_instances():
|
||||
if inst.get("creator_attributes"):
|
||||
raise ValueError("Instance created in New publisher, "
|
||||
"cannot be published in Pyblish.\n"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import pyblish.api
|
||||
import argparse
|
||||
import sys
|
||||
from pprint import pformat
|
||||
|
||||
|
|
@ -11,20 +10,40 @@ class CollectCelactionCliKwargs(pyblish.api.Collector):
|
|||
order = pyblish.api.Collector.order - 0.1
|
||||
|
||||
def process(self, context):
|
||||
parser = argparse.ArgumentParser(prog="celaction")
|
||||
parser.add_argument("--currentFile",
|
||||
help="Pass file to Context as `currentFile`")
|
||||
parser.add_argument("--chunk",
|
||||
help=("Render chanks on farm"))
|
||||
parser.add_argument("--frameStart",
|
||||
help=("Start of frame range"))
|
||||
parser.add_argument("--frameEnd",
|
||||
help=("End of frame range"))
|
||||
parser.add_argument("--resolutionWidth",
|
||||
help=("Width of resolution"))
|
||||
parser.add_argument("--resolutionHeight",
|
||||
help=("Height of resolution"))
|
||||
passing_kwargs = parser.parse_args(sys.argv[1:]).__dict__
|
||||
args = list(sys.argv[1:])
|
||||
self.log.info(str(args))
|
||||
missing_kwargs = []
|
||||
passing_kwargs = {}
|
||||
for key in (
|
||||
"chunk",
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"resolutionWidth",
|
||||
"resolutionHeight",
|
||||
"currentFile",
|
||||
):
|
||||
arg_key = f"--{key}"
|
||||
if arg_key not in args:
|
||||
missing_kwargs.append(key)
|
||||
continue
|
||||
arg_idx = args.index(arg_key)
|
||||
args.pop(arg_idx)
|
||||
if key != "currentFile":
|
||||
value = args.pop(arg_idx)
|
||||
else:
|
||||
path_parts = []
|
||||
while arg_idx < len(args):
|
||||
path_parts.append(args.pop(arg_idx))
|
||||
value = " ".join(path_parts).strip('"')
|
||||
|
||||
passing_kwargs[key] = value
|
||||
|
||||
if missing_kwargs:
|
||||
raise RuntimeError("Missing arguments {}".format(
|
||||
", ".join(
|
||||
[f'"{key}"' for key in missing_kwargs]
|
||||
)
|
||||
))
|
||||
|
||||
self.log.info("Storing kwargs ...")
|
||||
self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs)))
|
||||
|
|
|
|||
|
|
@ -144,13 +144,20 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
|
||||
"""
|
||||
obj_network = hou.node("/obj")
|
||||
op_ctx = obj_network.createNode(
|
||||
"null", node_name="OpenPypeContext")
|
||||
op_ctx = obj_network.createNode("null", node_name="OpenPypeContext")
|
||||
|
||||
# A null in houdini by default comes with content inside to visualize
|
||||
# the null. However since we explicitly want to hide the node lets
|
||||
# remove the content and disable the display flag of the node
|
||||
for node in op_ctx.children():
|
||||
node.destroy()
|
||||
|
||||
op_ctx.moveToGoodPosition()
|
||||
op_ctx.setBuiltExplicitly(False)
|
||||
op_ctx.setCreatorState("OpenPype")
|
||||
op_ctx.setComment("OpenPype node to hold context metadata")
|
||||
op_ctx.setColor(hou.Color((0.081, 0.798, 0.810)))
|
||||
op_ctx.setDisplayFlag(False)
|
||||
op_ctx.hide(True)
|
||||
return op_ctx
|
||||
|
||||
|
|
|
|||
|
|
@ -103,9 +103,8 @@ class HoudiniCreatorBase(object):
|
|||
fill it with all collected instances from the scene under its
|
||||
respective creator identifiers.
|
||||
|
||||
If legacy instances are detected in the scene, create
|
||||
`houdini_cached_legacy_subsets` there and fill it with
|
||||
all legacy subsets under family as a key.
|
||||
Create `houdini_cached_legacy_subsets` key for any legacy instances
|
||||
detected in the scene as instances per family.
|
||||
|
||||
Args:
|
||||
Dict[str, Any]: Shared data.
|
||||
|
|
@ -114,30 +113,31 @@ class HoudiniCreatorBase(object):
|
|||
Dict[str, Any]: Shared data dictionary.
|
||||
|
||||
"""
|
||||
if shared_data.get("houdini_cached_subsets") is None:
|
||||
shared_data["houdini_cached_subsets"] = {}
|
||||
if shared_data.get("houdini_cached_legacy_subsets") is None:
|
||||
shared_data["houdini_cached_legacy_subsets"] = {}
|
||||
cached_instances = lsattr("id", "pyblish.avalon.instance")
|
||||
for i in cached_instances:
|
||||
if not i.parm("creator_identifier"):
|
||||
# we have legacy instance
|
||||
family = i.parm("family").eval()
|
||||
if family not in shared_data[
|
||||
"houdini_cached_legacy_subsets"]:
|
||||
shared_data["houdini_cached_legacy_subsets"][
|
||||
family] = [i]
|
||||
else:
|
||||
shared_data[
|
||||
"houdini_cached_legacy_subsets"][family].append(i)
|
||||
continue
|
||||
if shared_data.get("houdini_cached_subsets") is not None:
|
||||
cache = dict()
|
||||
cache_legacy = dict()
|
||||
|
||||
for node in lsattr("id", "pyblish.avalon.instance"):
|
||||
|
||||
creator_identifier_parm = node.parm("creator_identifier")
|
||||
if creator_identifier_parm:
|
||||
# creator instance
|
||||
creator_id = creator_identifier_parm.eval()
|
||||
cache.setdefault(creator_id, []).append(node)
|
||||
|
||||
creator_id = i.parm("creator_identifier").eval()
|
||||
if creator_id not in shared_data["houdini_cached_subsets"]:
|
||||
shared_data["houdini_cached_subsets"][creator_id] = [i]
|
||||
else:
|
||||
shared_data[
|
||||
"houdini_cached_subsets"][creator_id].append(i) # noqa
|
||||
# legacy instance
|
||||
family_parm = node.parm("family")
|
||||
if not family_parm:
|
||||
# must be a broken instance
|
||||
continue
|
||||
|
||||
family = family_parm.eval()
|
||||
cache_legacy.setdefault(family, []).append(node)
|
||||
|
||||
shared_data["houdini_cached_subsets"] = cache
|
||||
shared_data["houdini_cached_legacy_subsets"] = cache_legacy
|
||||
|
||||
return shared_data
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -14,3 +14,10 @@ class MaxAddon(OpenPypeModule, IHostAddon):
|
|||
|
||||
def get_workfile_extensions(self):
|
||||
return [".max"]
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(MAX_HOST_DIR, "hooks")
|
||||
]
|
||||
|
|
|
|||
19
openpype/hosts/max/hooks/inject_python.py
Normal file
19
openpype/hosts/max/hooks/inject_python.py
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pre-launch hook to inject python environment."""
|
||||
from openpype.lib import PreLaunchHook
|
||||
import os
|
||||
|
||||
|
||||
class InjectPythonPath(PreLaunchHook):
|
||||
"""Inject OpenPype environment to 3dsmax.
|
||||
|
||||
Note that this works in combination whit 3dsmax startup script that
|
||||
is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH
|
||||
environment.
|
||||
|
||||
Hook `GlobalHostDataHook` must be executed before this hook.
|
||||
"""
|
||||
app_groups = ["3dsmax"]
|
||||
|
||||
def execute(self):
|
||||
self.launch_context.env["MAX_PYTHONPATH"] = os.environ["PYTHONPATH"]
|
||||
|
|
@ -2,8 +2,11 @@
|
|||
(
|
||||
local sysPath = dotNetClass "System.IO.Path"
|
||||
local sysDir = dotNetClass "System.IO.Directory"
|
||||
local localScript = getThisScriptFilename()
|
||||
local localScript = getThisScriptFilename()
|
||||
local startup = sysPath.Combine (sysPath.GetDirectoryName localScript) "startup.py"
|
||||
|
||||
local pythonpath = systemTools.getEnvVariable "MAX_PYTHONPATH"
|
||||
systemTools.setEnvVariable "PYTHONPATH" pythonpath
|
||||
|
||||
python.ExecuteFile startup
|
||||
)
|
||||
|
|
@ -1132,6 +1132,7 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
"""
|
||||
|
||||
renderer = "renderman"
|
||||
unmerged_aovs = {"PxrCryptomatte"}
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
|
|
@ -1181,6 +1182,17 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
if not display_types.get(display["driverNode"]["type"]):
|
||||
continue
|
||||
|
||||
has_cryptomatte = cmds.ls(type=self.unmerged_aovs)
|
||||
matte_enabled = False
|
||||
if has_cryptomatte:
|
||||
for cryptomatte in has_cryptomatte:
|
||||
cryptomatte_aov = cryptomatte
|
||||
matte_name = "cryptomatte"
|
||||
rman_globals = cmds.listConnections(cryptomatte +
|
||||
".message")
|
||||
if rman_globals:
|
||||
matte_enabled = True
|
||||
|
||||
aov_name = name
|
||||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
|
@ -1199,6 +1211,15 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
camera=camera,
|
||||
multipart=True
|
||||
)
|
||||
|
||||
if has_cryptomatte and matte_enabled:
|
||||
cryptomatte = RenderProduct(
|
||||
productName=matte_name,
|
||||
aov=cryptomatte_aov,
|
||||
ext=extensions,
|
||||
camera=camera,
|
||||
multipart=True
|
||||
)
|
||||
else:
|
||||
# this code should handle the case where no multipart
|
||||
# capable format is selected. But since it involves
|
||||
|
|
@ -1218,6 +1239,9 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
|
||||
products.append(product)
|
||||
|
||||
if has_cryptomatte and matte_enabled:
|
||||
products.append(cryptomatte)
|
||||
|
||||
return products
|
||||
|
||||
def get_files(self, product):
|
||||
|
|
|
|||
|
|
@ -22,17 +22,25 @@ class RenderSettings(object):
|
|||
_image_prefix_nodes = {
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
_image_prefixes = {
|
||||
'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa
|
||||
'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa
|
||||
'renderman': '<Scene>/<layer>/<layer>{aov_separator}<aov>',
|
||||
'renderman': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["image_prefix"], # noqa
|
||||
'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa
|
||||
}
|
||||
|
||||
# Renderman only
|
||||
_image_dir = {
|
||||
'renderman': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["image_dir"], # noqa
|
||||
'cryptomatte': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["cryptomatte_dir"], # noqa
|
||||
'imageDisplay': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["imageDisplay_dir"], # noqa
|
||||
"watermark": get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["watermark_dir"] # noqa
|
||||
}
|
||||
|
||||
_aov_chars = {
|
||||
"dot": ".",
|
||||
"dash": "-",
|
||||
|
|
@ -81,7 +89,6 @@ class RenderSettings(object):
|
|||
prefix, type="string") # noqa
|
||||
else:
|
||||
print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa
|
||||
|
||||
# TODO: handle not having res values in the doc
|
||||
width = asset_doc["data"].get("resolutionWidth")
|
||||
height = asset_doc["data"].get("resolutionHeight")
|
||||
|
|
@ -97,6 +104,13 @@ class RenderSettings(object):
|
|||
self._set_redshift_settings(width, height)
|
||||
mel.eval("redshiftUpdateActiveAovList")
|
||||
|
||||
if renderer == "renderman":
|
||||
image_dir = self._image_dir["renderman"]
|
||||
cmds.setAttr("rmanGlobals.imageOutputDir",
|
||||
image_dir, type="string")
|
||||
self._set_renderman_settings(width, height,
|
||||
aov_separator)
|
||||
|
||||
def _set_arnold_settings(self, width, height):
|
||||
"""Sets settings for Arnold."""
|
||||
from mtoa.core import createOptions # noqa
|
||||
|
|
@ -202,6 +216,66 @@ class RenderSettings(object):
|
|||
cmds.setAttr("defaultResolution.height", height)
|
||||
self._additional_attribs_setter(additional_options)
|
||||
|
||||
def _set_renderman_settings(self, width, height, aov_separator):
|
||||
"""Sets settings for Renderman"""
|
||||
rman_render_presets = (
|
||||
self._project_settings
|
||||
["maya"]
|
||||
["RenderSettings"]
|
||||
["renderman_renderer"]
|
||||
)
|
||||
display_filters = rman_render_presets["display_filters"]
|
||||
d_filters_number = len(display_filters)
|
||||
for i in range(d_filters_number):
|
||||
d_node = cmds.ls(typ=display_filters[i])
|
||||
if len(d_node) > 0:
|
||||
filter_nodes = d_node[0]
|
||||
else:
|
||||
filter_nodes = cmds.createNode(display_filters[i])
|
||||
|
||||
cmds.connectAttr(filter_nodes + ".message",
|
||||
"rmanGlobals.displayFilters[%i]" % i,
|
||||
force=True)
|
||||
if filter_nodes.startswith("PxrImageDisplayFilter"):
|
||||
imageDisplay_dir = self._image_dir["imageDisplay"]
|
||||
imageDisplay_dir = imageDisplay_dir.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
cmds.setAttr(filter_nodes + ".filename",
|
||||
imageDisplay_dir, type="string")
|
||||
|
||||
sample_filters = rman_render_presets["sample_filters"]
|
||||
s_filters_number = len(sample_filters)
|
||||
for n in range(s_filters_number):
|
||||
s_node = cmds.ls(typ=sample_filters[n])
|
||||
if len(s_node) > 0:
|
||||
filter_nodes = s_node[0]
|
||||
else:
|
||||
filter_nodes = cmds.createNode(sample_filters[n])
|
||||
|
||||
cmds.connectAttr(filter_nodes + ".message",
|
||||
"rmanGlobals.sampleFilters[%i]" % n,
|
||||
force=True)
|
||||
|
||||
if filter_nodes.startswith("PxrCryptomatte"):
|
||||
matte_dir = self._image_dir["cryptomatte"]
|
||||
matte_dir = matte_dir.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
cmds.setAttr(filter_nodes + ".filename",
|
||||
matte_dir, type="string")
|
||||
elif filter_nodes.startswith("PxrWatermarkFilter"):
|
||||
watermark_dir = self._image_dir["watermark"]
|
||||
watermark_dir = watermark_dir.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
cmds.setAttr(filter_nodes + ".filename",
|
||||
watermark_dir, type="string")
|
||||
|
||||
additional_options = rman_render_presets["additional_options"]
|
||||
|
||||
self._set_global_output_settings()
|
||||
cmds.setAttr("defaultResolution.width", width)
|
||||
cmds.setAttr("defaultResolution.height", height)
|
||||
self._additional_attribs_setter(additional_options)
|
||||
|
||||
def _set_vray_settings(self, aov_separator, width, height):
|
||||
# type: (str, int, int) -> None
|
||||
"""Sets important settings for Vray."""
|
||||
|
|
|
|||
|
|
@ -0,0 +1,52 @@
|
|||
import os
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import ValidateContentsOrder
|
||||
|
||||
|
||||
class ValidatePluginPathAttributes(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validate plug-in path attributes point to existing file paths.
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
hosts = ['maya']
|
||||
families = ["workfile"]
|
||||
label = "Plug-in Path Attributes"
|
||||
|
||||
def get_invalid(self, instance):
|
||||
invalid = list()
|
||||
|
||||
# get the project setting
|
||||
validate_path = (
|
||||
instance.context.data["project_settings"]["maya"]["publish"]
|
||||
)
|
||||
file_attr = validate_path["ValidatePluginPathAttributes"]["attribute"]
|
||||
if not file_attr:
|
||||
return invalid
|
||||
|
||||
# get the nodes and file attributes
|
||||
for node, attr in file_attr.items():
|
||||
# check the related nodes
|
||||
targets = cmds.ls(type=node)
|
||||
|
||||
for target in targets:
|
||||
# get the filepath
|
||||
file_attr = "{}.{}".format(target, attr)
|
||||
filepath = cmds.getAttr(file_attr)
|
||||
|
||||
if filepath and not os.path.exists(filepath):
|
||||
self.log.error("File {0} not exists".format(filepath)) # noqa
|
||||
invalid.append(target)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all directories Set as Filenames in Non-Maya Nodes"""
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Non-existent Path "
|
||||
"found: {0}".format(invalid))
|
||||
|
|
@ -1,10 +1,12 @@
|
|||
import os
|
||||
import shutil
|
||||
|
||||
import pyblish.api
|
||||
import clique
|
||||
import nuke
|
||||
|
||||
from openpype.pipeline import publish
|
||||
from openpype.lib import collect_frames
|
||||
|
||||
|
||||
class NukeRenderLocal(publish.ExtractorColormanaged):
|
||||
|
|
@ -13,6 +15,8 @@ class NukeRenderLocal(publish.ExtractorColormanaged):
|
|||
Extract the result of savers by starting a comp render
|
||||
This will run the local render of Fusion.
|
||||
|
||||
Allows to use last published frames and overwrite only specific ones
|
||||
(set in instance.data.get("frames_to_fix"))
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
|
|
@ -21,7 +25,6 @@ class NukeRenderLocal(publish.ExtractorColormanaged):
|
|||
families = ["render.local", "prerender.local", "still.local"]
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
child_nodes = (
|
||||
instance.data.get("transientData", {}).get("childNodes")
|
||||
or instance
|
||||
|
|
@ -32,17 +35,16 @@ class NukeRenderLocal(publish.ExtractorColormanaged):
|
|||
if x.Class() == "Write":
|
||||
node = x
|
||||
|
||||
self.log.debug("instance collected: {}".format(instance.data))
|
||||
|
||||
node_subset_name = instance.data.get("name", None)
|
||||
|
||||
first_frame = instance.data.get("frameStartHandle", None)
|
||||
|
||||
last_frame = instance.data.get("frameEndHandle", None)
|
||||
node_subset_name = instance.data["subset"]
|
||||
|
||||
self.log.info("Starting render")
|
||||
self.log.info("Start frame: {}".format(first_frame))
|
||||
self.log.info("End frame: {}".format(last_frame))
|
||||
|
||||
filenames = []
|
||||
node_file = node["file"]
|
||||
# Collecte expected filepaths for each frame
|
||||
# Collect expected filepaths for each frame
|
||||
# - for cases that output is still image is first created set of
|
||||
# paths which is then sorted and converted to list
|
||||
expected_paths = list(sorted({
|
||||
|
|
@ -50,22 +52,37 @@ class NukeRenderLocal(publish.ExtractorColormanaged):
|
|||
for frame in range(first_frame, last_frame + 1)
|
||||
}))
|
||||
# Extract only filenames for representation
|
||||
filenames = [
|
||||
filenames.extend([
|
||||
os.path.basename(filepath)
|
||||
for filepath in expected_paths
|
||||
]
|
||||
])
|
||||
|
||||
# Ensure output directory exists.
|
||||
out_dir = os.path.dirname(expected_paths[0])
|
||||
if not os.path.exists(out_dir):
|
||||
os.makedirs(out_dir)
|
||||
|
||||
# Render frames
|
||||
nuke.execute(
|
||||
str(node_subset_name),
|
||||
int(first_frame),
|
||||
int(last_frame)
|
||||
)
|
||||
frames_to_render = [(first_frame, last_frame)]
|
||||
|
||||
frames_to_fix = instance.data.get("frames_to_fix")
|
||||
if instance.data.get("last_version_published_files") and frames_to_fix:
|
||||
frames_to_render = self._get_frames_to_render(frames_to_fix)
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
self._copy_last_published(anatomy, instance, out_dir,
|
||||
filenames)
|
||||
|
||||
for render_first_frame, render_last_frame in frames_to_render:
|
||||
|
||||
self.log.info("Starting render")
|
||||
self.log.info("Start frame: {}".format(render_first_frame))
|
||||
self.log.info("End frame: {}".format(render_last_frame))
|
||||
|
||||
# Render frames
|
||||
nuke.execute(
|
||||
str(node_subset_name),
|
||||
int(render_first_frame),
|
||||
int(render_last_frame)
|
||||
)
|
||||
|
||||
ext = node["file_type"].value()
|
||||
colorspace = node["colorspace"].value()
|
||||
|
|
@ -106,6 +123,7 @@ class NukeRenderLocal(publish.ExtractorColormanaged):
|
|||
out_dir
|
||||
))
|
||||
|
||||
families = instance.data["families"]
|
||||
# redefinition of families
|
||||
if "render.local" in families:
|
||||
instance.data['family'] = 'render'
|
||||
|
|
@ -133,3 +151,58 @@ class NukeRenderLocal(publish.ExtractorColormanaged):
|
|||
self.log.info('Finished render')
|
||||
|
||||
self.log.debug("_ instance.data: {}".format(instance.data))
|
||||
|
||||
def _copy_last_published(self, anatomy, instance, out_dir,
|
||||
expected_filenames):
|
||||
"""Copies last published files to temporary out_dir.
|
||||
|
||||
These are base of files which will be extended/fixed for specific
|
||||
frames.
|
||||
Renames published file to expected file name based on frame, eg.
|
||||
test_project_test_asset_subset_v005.1001.exr > new_render.1001.exr
|
||||
"""
|
||||
last_published = instance.data["last_version_published_files"]
|
||||
last_published_and_frames = collect_frames(last_published)
|
||||
|
||||
expected_and_frames = collect_frames(expected_filenames)
|
||||
frames_and_expected = {v: k for k, v in expected_and_frames.items()}
|
||||
for file_path, frame in last_published_and_frames.items():
|
||||
file_path = anatomy.fill_root(file_path)
|
||||
if not os.path.exists(file_path):
|
||||
continue
|
||||
target_file_name = frames_and_expected.get(frame)
|
||||
if not target_file_name:
|
||||
continue
|
||||
|
||||
out_path = os.path.join(out_dir, target_file_name)
|
||||
self.log.debug("Copying '{}' -> '{}'".format(file_path, out_path))
|
||||
shutil.copy(file_path, out_path)
|
||||
|
||||
# TODO shouldn't this be uncommented
|
||||
# instance.context.data["cleanupFullPaths"].append(out_path)
|
||||
|
||||
def _get_frames_to_render(self, frames_to_fix):
|
||||
"""Return list of frame range tuples to render
|
||||
|
||||
Args:
|
||||
frames_to_fix (str): specific or range of frames to be rerendered
|
||||
(1005, 1009-1010)
|
||||
Returns:
|
||||
(list): [(1005, 1005), (1009-1010)]
|
||||
"""
|
||||
frames_to_render = []
|
||||
|
||||
for frame_range in frames_to_fix.split(","):
|
||||
if frame_range.isdigit():
|
||||
render_first_frame = frame_range
|
||||
render_last_frame = frame_range
|
||||
elif '-' in frame_range:
|
||||
frames = frame_range.split('-')
|
||||
render_first_frame = int(frames[0])
|
||||
render_last_frame = int(frames[1])
|
||||
else:
|
||||
raise ValueError("Wrong format of frames to fix {}"
|
||||
.format(frames_to_fix))
|
||||
frames_to_render.append((render_first_frame,
|
||||
render_last_frame))
|
||||
return frames_to_render
|
||||
|
|
|
|||
|
|
@ -189,6 +189,6 @@ class FileTransaction(object):
|
|||
def _same_paths(self, src, dst):
|
||||
# handles same paths but with C:/project vs c:/project
|
||||
if os.path.exists(src) and os.path.exists(dst):
|
||||
return os.path.samefile(src, dst)
|
||||
return os.stat(src) == os.stat(dst)
|
||||
|
||||
return src == dst
|
||||
|
|
|
|||
|
|
@ -344,9 +344,9 @@ def get_imageio_config(
|
|||
imageio_global, imageio_host = _get_imageio_settings(
|
||||
project_settings, host_name)
|
||||
|
||||
config_host = imageio_host["ocio_config"]
|
||||
config_host = imageio_host.get("ocio_config", {})
|
||||
|
||||
if config_host["enabled"]:
|
||||
if config_host.get("enabled"):
|
||||
config_data = _get_config_data(
|
||||
config_host["filepath"], anatomy_data
|
||||
)
|
||||
|
|
|
|||
|
|
@ -372,6 +372,12 @@ class ExtractorColormanaged(Extractor):
|
|||
```
|
||||
|
||||
"""
|
||||
ext = representation["ext"]
|
||||
# check extension
|
||||
self.log.debug("__ ext: `{}`".format(ext))
|
||||
if ext.lower() not in self.allowed_ext:
|
||||
return
|
||||
|
||||
if colorspace_settings is None:
|
||||
colorspace_settings = self.get_colorspace_settings(context)
|
||||
|
||||
|
|
@ -386,12 +392,6 @@ class ExtractorColormanaged(Extractor):
|
|||
self.log.info("Config data is : `{}`".format(
|
||||
config_data))
|
||||
|
||||
ext = representation["ext"]
|
||||
# check extension
|
||||
self.log.debug("__ ext: `{}`".format(ext))
|
||||
if ext.lower() not in self.allowed_ext:
|
||||
return
|
||||
|
||||
project_name = context.data["projectName"]
|
||||
host_name = context.data["hostName"]
|
||||
project_settings = context.data["project_settings"]
|
||||
|
|
|
|||
80
openpype/plugins/publish/collect_frames_fix.py
Normal file
80
openpype/plugins/publish/collect_frames_fix.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
import pyblish.api
|
||||
from openpype.lib.attribute_definitions import (
|
||||
TextDef,
|
||||
BoolDef
|
||||
)
|
||||
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
from openpype.client.entities import (
|
||||
get_last_version_by_subset_name,
|
||||
get_representations
|
||||
)
|
||||
|
||||
|
||||
class CollectFramesFixDef(
|
||||
pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin
|
||||
):
|
||||
"""Provides text field to insert frame(s) to be rerendered.
|
||||
|
||||
Published files of last version of an instance subset are collected into
|
||||
instance.data["last_version_published_files"]. All these but frames
|
||||
mentioned in text field will be reused for new version.
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder + 0.495
|
||||
label = "Collect Frames to Fix"
|
||||
targets = ["local"]
|
||||
hosts = ["nuke"]
|
||||
families = ["render", "prerender"]
|
||||
enabled = True
|
||||
|
||||
def process(self, instance):
|
||||
attribute_values = self.get_attr_values_from_data(instance.data)
|
||||
frames_to_fix = attribute_values.get("frames_to_fix")
|
||||
rewrite_version = attribute_values.get("rewrite_version")
|
||||
|
||||
if frames_to_fix:
|
||||
instance.data["frames_to_fix"] = frames_to_fix
|
||||
|
||||
subset_name = instance.data["subset"]
|
||||
asset_name = instance.data["asset"]
|
||||
|
||||
project_entity = instance.data["projectEntity"]
|
||||
project_name = project_entity["name"]
|
||||
|
||||
version = get_last_version_by_subset_name(project_name,
|
||||
subset_name,
|
||||
asset_name=asset_name)
|
||||
if not version:
|
||||
self.log.warning("No last version found, "
|
||||
"re-render not possible")
|
||||
return
|
||||
|
||||
representations = get_representations(project_name,
|
||||
version_ids=[version["_id"]])
|
||||
published_files = []
|
||||
for repre in representations:
|
||||
if repre["context"]["family"] not in self.families:
|
||||
continue
|
||||
|
||||
for file_info in repre.get("files"):
|
||||
published_files.append(file_info["path"])
|
||||
|
||||
instance.data["last_version_published_files"] = published_files
|
||||
self.log.debug("last_version_published_files::{}".format(
|
||||
instance.data["last_version_published_files"]))
|
||||
|
||||
if rewrite_version:
|
||||
instance.data["version"] = version["name"]
|
||||
# limits triggering version validator
|
||||
instance.data.pop("latestVersion")
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
TextDef("frames_to_fix", label="Frames to fix",
|
||||
placeholder="5,10-15",
|
||||
regex="[0-9,-]+"),
|
||||
BoolDef("rewrite_version", label="Rewrite latest version",
|
||||
default=False),
|
||||
]
|
||||
|
|
@ -534,6 +534,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
template_data["representation"] = repre["name"]
|
||||
template_data["ext"] = repre["ext"]
|
||||
|
||||
# allow overwriting existing version
|
||||
template_data["version"] = version["name"]
|
||||
|
||||
# add template data for colorspaceData
|
||||
if repre.get("colorspaceData"):
|
||||
colorspace = repre["colorspaceData"]["colorspace"]
|
||||
|
|
|
|||
|
|
@ -93,6 +93,16 @@
|
|||
"force_combine": true,
|
||||
"aov_list": [],
|
||||
"additional_options": []
|
||||
},
|
||||
"renderman_renderer": {
|
||||
"image_prefix": "<layer>{aov_separator}<aov>.<f4>.<ext>",
|
||||
"image_dir": "<scene>/<layer>",
|
||||
"display_filters": [],
|
||||
"imageDisplay_dir": "<imagedir>/<layer>{aov_separator}imageDisplayFilter.<f4>.<ext>",
|
||||
"sample_filters": [],
|
||||
"cryptomatte_dir": "<imagedir>/<layer>{aov_separator}cryptomatte.<f4>.<ext>",
|
||||
"watermark_dir": "<imagedir>/<layer>{aov_separator}watermarkFilter.<f4>.<ext>",
|
||||
"additional_options": []
|
||||
}
|
||||
},
|
||||
"create": {
|
||||
|
|
@ -346,6 +356,45 @@
|
|||
"rig"
|
||||
]
|
||||
},
|
||||
"ValidatePluginPathAttributes": {
|
||||
"enabled": true,
|
||||
"optional": false,
|
||||
"active": true,
|
||||
"attribute": {
|
||||
"AlembicNode": "abc_File",
|
||||
"VRayProxy": "fileName",
|
||||
"RenderManArchive": "filename",
|
||||
"pgYetiMaya": "cacheFileName",
|
||||
"aiStandIn": "dso",
|
||||
"RedshiftSprite": "tex0",
|
||||
"RedshiftBokeh": "dofBokehImage",
|
||||
"RedshiftCameraMap": "tex0",
|
||||
"RedshiftEnvironment": "tex2",
|
||||
"RedshiftDomeLight": "tex1",
|
||||
"RedshiftIESLight": "profile",
|
||||
"RedshiftLightGobo": "tex0",
|
||||
"RedshiftNormalMap": "tex0",
|
||||
"RedshiftProxyMesh": "fileName",
|
||||
"RedshiftVolumeShape": "fileName",
|
||||
"VRayTexGLSL": "fileName",
|
||||
"VRayMtlGLSL": "fileName",
|
||||
"VRayVRmatMtl": "fileName",
|
||||
"VRayPtex": "ptexFile",
|
||||
"VRayLightIESShape": "iesFile",
|
||||
"VRayMesh": "materialAssignmentsFile",
|
||||
"VRayMtlOSL": "fileName",
|
||||
"VRayTexOSL": "fileName",
|
||||
"VRayTexOCIO": "ocioConfigFile",
|
||||
"VRaySettingsNode": "pmap_autoSaveFile2",
|
||||
"VRayScannedMtl": "file",
|
||||
"VRayScene": "parameterOverrideFilePath",
|
||||
"VRayMtlMDL": "filename",
|
||||
"VRaySimbiont": "file",
|
||||
"dlOpenVDBShape": "filename",
|
||||
"pgYetiMayaShape": "liveABCFilename",
|
||||
"gpuCache": "cacheFileName"
|
||||
}
|
||||
},
|
||||
"ValidateRenderSettings": {
|
||||
"arnold_render_attributes": [],
|
||||
"vray_render_attributes": [],
|
||||
|
|
|
|||
|
|
@ -313,6 +313,45 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ValidatePluginPathAttributes",
|
||||
"label": "Plug-in Path Attributes",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active",
|
||||
"label": "Active"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Fill in the node types and attributes you want to validate. <p>e.g. <b>AlembicNode.abc_file</b>, the node type is <b>AlembicNode</b> and the node attribute is <b>abc_file</b>"
|
||||
},
|
||||
{
|
||||
"type": "dict-modifiable",
|
||||
"collapsible": true,
|
||||
"key": "attribute",
|
||||
"label": "File Attribute",
|
||||
"use_label_wrap": true,
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
|
|
|
|||
|
|
@ -423,6 +423,93 @@
|
|||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "renderman_renderer",
|
||||
"label": "Renderman Renderer",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"key": "image_prefix",
|
||||
"label": "Image prefix template",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"key": "image_dir",
|
||||
"label": "Image Output Directory",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"key": "display_filters",
|
||||
"label": "Display Filters",
|
||||
"type": "enum",
|
||||
"multiselection": true,
|
||||
"defaults": "empty",
|
||||
"enum_items": [
|
||||
{"PxrBackgroundDisplayFilter": "PxrBackgroundDisplayFilter"},
|
||||
{"PxrCopyAOVDisplayFilter": "PxrCopyAOVDisplayFilter"},
|
||||
{"PxrEdgeDetect":"PxrEdgeDetect"},
|
||||
{"PxrFilmicTonemapperDisplayFilter": "PxrFilmicTonemapperDisplayFilter"},
|
||||
{"PxrGradeDisplayFilter": "PxrGradeDisplayFilter"},
|
||||
{"PxrHalfBufferErrorFilter": "PxrHalfBufferErrorFilter"},
|
||||
{"PxrImageDisplayFilter": "PxrImageDisplayFilter"},
|
||||
{"PxrLightSaturation": "PxrLightSaturation"},
|
||||
{"PxrShadowDisplayFilter": "PxrShadowDisplayFilter"},
|
||||
{"PxrStylizedHatching": "PxrStylizedHatching"},
|
||||
{"PxrStylizedLines": "PxrStylizedLines"},
|
||||
{"PxrStylizedToon": "PxrStylizedToon"},
|
||||
{"PxrWhitePointDisplayFilter": "PxrWhitePointDisplayFilter"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "imageDisplay_dir",
|
||||
"label": "Image Display Filter Directory",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"key": "sample_filters",
|
||||
"label": "Sample Filters",
|
||||
"type": "enum",
|
||||
"multiselection": true,
|
||||
"defaults": "empty",
|
||||
"enum_items": [
|
||||
{"PxrBackgroundSampleFilter": "PxrBackgroundSampleFilter"},
|
||||
{"PxrCopyAOVSampleFilter": "PxrCopyAOVSampleFilter"},
|
||||
{"PxrCryptomatte": "PxrCryptomatte"},
|
||||
{"PxrFilmicTonemapperSampleFilter": "PxrFilmicTonemapperSampleFilter"},
|
||||
{"PxrGradeSampleFilter": "PxrGradeSampleFilter"},
|
||||
{"PxrShadowFilter": "PxrShadowFilter"},
|
||||
{"PxrWatermarkFilter": "PxrWatermarkFilter"},
|
||||
{"PxrWhitePointSampleFilter": "PxrWhitePointSampleFilter"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "cryptomatte_dir",
|
||||
"label": "Cryptomatte Output Directory",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"key": "watermark_dir",
|
||||
"label": "Watermark Filter Directory",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Add additional options - put attribute and value, like <code>Ci</code>"
|
||||
},
|
||||
{
|
||||
"type": "dict-modifiable",
|
||||
"store_as_list": true,
|
||||
"key": "additional_options",
|
||||
"label": "Additional Renderer Options",
|
||||
"use_label_wrap": true,
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "OpenPype"
|
||||
version = "3.15.0-nightly.1" # OpenPype
|
||||
version = "3.15.0" # OpenPype
|
||||
description = "Open VFX and Animation pipeline with support."
|
||||
authors = ["OpenPype Team <info@openpype.io>"]
|
||||
license = "MIT License"
|
||||
|
|
@ -146,10 +146,6 @@ hash = "b9950f5d2fa3720b52b8be55bacf5f56d33f9e029d38ee86534995f3d8d253d2"
|
|||
url = "https://distribute.openpype.io/thirdparty/oiio_tools-2.2.20-linux-centos7.tgz"
|
||||
hash = "3894dec7e4e521463891a869586850e8605f5fd604858b674c87323bf33e273d"
|
||||
|
||||
[openpype.thirdparty.oiio.darwin]
|
||||
url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-darwin.tgz"
|
||||
hash = "sha256:..."
|
||||
|
||||
[openpype.thirdparty.ocioconfig]
|
||||
url = "https://distribute.openpype.io/thirdparty/OpenColorIO-Configs-1.0.2.zip"
|
||||
hash = "4ac17c1f7de83465e6f51dd352d7117e07e765b66d00443257916c828e35b6ce"
|
||||
|
|
|
|||
|
|
@ -130,8 +130,10 @@ def install_thirdparty(pyproject, openpype_root, platform_name):
|
|||
_print("trying to get universal url for all platforms")
|
||||
url = v.get("url")
|
||||
if not url:
|
||||
_print("cannot get url", 1)
|
||||
sys.exit(1)
|
||||
_print("cannot get url for all platforms", 1)
|
||||
_print((f"Warning: {k} is not installed for current platform "
|
||||
"and it might be missing in the build"), 1)
|
||||
continue
|
||||
else:
|
||||
url = v.get(platform_name).get("url")
|
||||
destination_path = destination_path / platform_name
|
||||
|
|
|
|||
|
|
@ -37,3 +37,8 @@ This functionality cannot deal with all cases and is not error proof, some inter
|
|||
```bash
|
||||
openpype_console module kitsu push-to-zou -l me@domain.ext -p my_password
|
||||
```
|
||||
|
||||
## Q&A
|
||||
### Is it safe to rename an entity from Kitsu?
|
||||
Absolutely! Entities are linked by their unique IDs between the two databases.
|
||||
But renaming from the OP's Project Manager won't apply the change to Kitsu, it'll be overriden during the next synchronization.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue