Merge remote-tracking branch 'origin/feature/nuke-slate-prerender' into master-testing-local

# Conflicts:
#	pype/plugins/global/publish/integrate_new.py
This commit is contained in:
jakub@orbi.tools 2020-01-13 18:15:14 +01:00
commit af0d4dafe3
8 changed files with 454 additions and 21 deletions

View file

@ -63,7 +63,8 @@ class ExtractBurnin(pype.api.Extractor):
filename = "{0}".format(repre["files"])
name = "_burnin"
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
ext = os.path.splitext(filename)[1]
movieFileBurnin = filename.replace(ext, "") + name + ext
full_movie_path = os.path.join(
os.path.normpath(stagingdir), repre["files"]

View file

@ -32,13 +32,13 @@ class ExtractReview(pyblish.api.InstancePlugin):
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("frameStart")
resolution_width = instance.data.get("resolutionWidth", to_width)
resolution_height = instance.data.get("resolutionHeight", to_height)
pixel_aspect = instance.data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(instance.data["families"]))
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(inst_data["families"]))
# get representation and loop them
representations = instance.data["representations"]
representations = inst_data["representations"]
# filter out mov and img sequences
representations_new = representations[:]
@ -224,7 +224,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.debug("__ height_scale: `{}`".format(height_scale))
self.log.debug("__ height_half_pad: `{}`".format(height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
)
@ -279,7 +278,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args
"codec": codec_args,
"_profile": profile,
"anatomy_template": "render"
})
if repre_new.get('preview'):
repre_new.pop("preview")

View file

@ -0,0 +1,244 @@
import os
import pype.api
import pyblish
class ExtractReviewSlate(pype.api.Extractor):
"""
Will add slate frame at the start of the video files
"""
label = "Review with Slate frame"
order = pyblish.api.ExtractorOrder + 0.031
families = ["slate"]
hosts = ["nuke", "maya", "shell"]
optional = True
def process(self, instance):
inst_data = instance.data
if "representations" not in inst_data:
raise RuntimeError("Burnin needs already created mov to work on.")
suffix = "_slate"
slate_path = inst_data.get("slateFrame")
ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg")
to_width = 1920
to_height = 1080
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
fps = inst_data.get("fps")
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
for i, repre in enumerate(inst_data["representations"]):
_remove_at_end = []
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
p_tags = repre.get("tags", [])
if "slate-frame" not in p_tags:
continue
stagingdir = repre["stagingDir"]
input_file = "{0}".format(repre["files"])
ext = os.path.splitext(input_file)[1]
output_file = input_file.replace(ext, "") + suffix + ext
input_path = os.path.join(
os.path.normpath(stagingdir), repre["files"])
self.log.debug("__ input_path: {}".format(input_path))
_remove_at_end.append(input_path)
output_path = os.path.join(
os.path.normpath(stagingdir), output_file)
self.log.debug("__ output_path: {}".format(output_path))
input_args = []
output_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(repre["_profile"].get('input', []))
input_args.append("-loop 1 -i {}".format(slate_path))
input_args.extend([
"-r {}".format(fps),
"-t 0.04"]
)
# output args
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
# make sure colors are correct
output_args.extend([
"-vf scale=out_color_matrix=bt709",
"-color_primaries bt709",
"-color_trc bt709",
"-colorspace bt709"
])
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
slate_v_path = slate_path.replace(".png", ext)
output_args.append(slate_v_path)
_remove_at_end.append(slate_v_path)
slate_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
slate_subprcs_cmd = " ".join(slate_args)
# run slate generation subprocess
self.log.debug("Slate Executing: {}".format(slate_subprcs_cmd))
slate_output = pype.api.subprocess(slate_subprcs_cmd)
self.log.debug("Slate Output: {}".format(slate_output))
# create ffmpeg concat text file path
conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt"
conc_text_path = os.path.join(
os.path.normpath(stagingdir), conc_text_file)
_remove_at_end.append(conc_text_path)
self.log.debug("__ conc_text_path: {}".format(conc_text_path))
new_line = "\n"
with open(conc_text_path, "w") as conc_text_f:
conc_text_f.writelines([
"file {}".format(
slate_v_path.replace("\\", "/")),
new_line,
"file {}".format(input_path.replace("\\", "/"))
])
# concat slate and videos together
conc_input_args = ["-y", "-f concat", "-safe 0"]
conc_input_args.append("-i {}".format(conc_text_path))
conc_output_args = ["-c copy"]
conc_output_args.append(output_path)
concat_args = [
ffmpeg_path,
" ".join(conc_input_args),
" ".join(conc_output_args)
]
concat_subprcs_cmd = " ".join(concat_args)
# ffmpeg concat subprocess
self.log.debug("Executing concat: {}".format(concat_subprcs_cmd))
concat_output = pype.api.subprocess(concat_subprcs_cmd)
self.log.debug("Output concat: {}".format(concat_output))
self.log.debug("__ repre[tags]: {}".format(repre["tags"]))
repre_update = {
"files": output_file,
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"],
"anatomy_template": "render"
}
inst_data["representations"][i].update(repre_update)
self.log.debug(
"_ representation {}: `{}`".format(
i, inst_data["representations"][i]))
# removing temp files
for f in _remove_at_end:
os.remove(f)
self.log.debug("Removed: `{}`".format(f))
# Remove any representations tagged for deletion.
for repre in inst_data.get("representations", []):
if "delete" in repre.get("tags", []):
self.log.debug("Removing representation: {}".format(repre))
inst_data["representations"].remove(repre)
self.log.debug(inst_data["representations"])
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Args:
args (list): list of string arguments
inserting_arg (str): string argument we want to add
(without flag `-vf`)
Returns:
str: long joined argument to be added back to list of arguments
"""
# find all video format settings
vf_settings = [p for p in args
for v in ["-filter:v", "-vf"]
if v in p]
self.log.debug("_ vf_settings: `{}`".format(vf_settings))
# remove them from output args list
for p in vf_settings:
self.log.debug("_ remove p: `{}`".format(p))
args.remove(p)
self.log.debug("_ args: `{}`".format(args))
# strip them from all flags
vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "")
for p in vf_settings]
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
vf_fixed.insert(0, inserting_arg)
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
# create new video filter setting
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back

View file

@ -474,6 +474,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
drive, _path = os.path.splitdrive(dst)
unc = Path(drive).resolve()
dst = str(unc / _path)
src = str(src)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)

View file

@ -0,0 +1,39 @@
import pyblish.api
import nuke
class CollectSlate(pyblish.api.InstancePlugin):
"""Check if SLATE node is in scene and connected to rendering tree"""
order = pyblish.api.CollectorOrder + 0.09
label = "Collect Slate Node"
hosts = ["nuke"]
families = ["write"]
def process(self, instance):
node = instance[0]
slate = next((n for n in nuke.allNodes()
if "slate" in n.name().lower()
if not n["disable"].getValue()),
None)
if slate:
# check if slate node is connected to write node tree
slate_check = 0
slate_node = None
while slate_check == 0:
try:
node = node.dependencies()[0]
if slate.name() in node.name():
slate_node = node
slate_check = 1
except IndexError:
break
if slate_node:
instance.data["slateNodeName"] = slate_node.name()
instance.data["families"].append("slate")
self.log.info(
"Slate node is in node graph: `{}`".format(slate.name()))
self.log.debug(
"__ instance: `{}`".format(instance))

View file

@ -100,6 +100,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"
group_node = [x for x in instance if x.Class() == "Group"][0]
deadlineChunkSize = 1
@ -129,5 +130,4 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"subsetGroup": "renders"
})
self.log.debug("instance.data: {}".format(instance.data))

View file

@ -0,0 +1,141 @@
import os
import nuke
from avalon.nuke import lib as anlib
import pyblish.api
import pype
class ExtractSlateFrame(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Slate Frame"
families = ["slate"]
hosts = ["nuke"]
def process(self, instance):
with anlib.maintained_selection():
self.log.debug("instance: {}".format(instance))
self.log.debug("instance.data[families]: {}".format(
instance.data["families"]))
self.render_slate(instance)
def render_slate(self, instance):
node = instance[0] # group node
self.log.info("Creating staging dir...")
if "representations" in instance.data:
staging_dir = instance.data[
"representations"][0]["stagingDir"].replace("\\", "/")
instance.data["stagingDir"] = staging_dir
else:
instance.data["representations"] = []
# get output path
render_path = instance.data['path']
staging_dir = os.path.normpath(os.path.dirname(render_path))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
temporary_nodes = []
collection = instance.data.get("collection", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
# get first and last frame
first_frame = min(collection.indexes) - 1
last_frame = first_frame
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("frameStart", None) - 1
last_frame = first_frame
if "#" in fhead:
fhead = fhead.replace("#", "")[:-1]
previous_node = node
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# create write node
write_node = nuke.createNode("Write")
file = fhead + "slate.png"
name = "slate"
path = os.path.join(staging_dir, file).replace("\\", "/")
instance.data["slateFrame"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("png")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
repre = {
'name': name,
'ext': "png",
'files': file,
"stagingDir": staging_dir,
"frameStart": first_frame,
"frameEnd": last_frame,
"anatomy_template": "render"
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
self.log.debug(
"representations: {}".format(instance.data["representations"]))
self.log.debug(
"slate frame path: {}".format(instance.data["slateFrame"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)
def get_view_process_node(self):
# Select only the target node
if nuke.selectedNodes():
[n.setSelected(False) for n in nuke.selectedNodes()]
ipn_orig = None
for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]:
ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
nuke.nodeCopy('%clipboard%')
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
nuke.nodePaste('%clipboard%')
ipn = nuke.selectedNode()
return ipn

View file

@ -8,24 +8,31 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
"""Ensure knobs are consistent.
Knobs to validate and their values comes from the
"nuke/knobs.json" preset, which needs this structure:
{
"family": {
"knob_name": knob_value
}
}
Example for presets in config:
"presets/plugins/nuke/publish.json" preset, which needs this structure:
"ValidateNukeWriteKnobs": {
"enabled": true,
"knobs": {
"family": {
"knob_name": knob_value
}
}
}
"""
order = pyblish.api.ValidatorOrder
label = "Knobs"
label = "Validate Write Knobs"
hosts = ["nuke"]
actions = [pype.api.RepairContextAction]
optional = True
def process(self, context):
# Check for preset existence.
if not context.data["presets"]["nuke"].get("knobs"):
if not getattr(self, "knobs"):
return
self.log.debug("__ self.knobs: {}".format(self.knobs))
invalid = self.get_invalid(context, compute=True)
if invalid:
@ -43,7 +50,6 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
@classmethod
def get_invalid_knobs(cls, context):
presets = context.data["presets"]["nuke"]["knobs"]
invalid_knobs = []
for instance in context:
# Filter publisable instances.
@ -53,15 +59,15 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
# Filter families.
families = [instance.data["family"]]
families += instance.data.get("families", [])
families = list(set(families) & set(presets.keys()))
families = list(set(families) & set(cls.knobs.keys()))
if not families:
continue
# Get all knobs to validate.
knobs = {}
for family in families:
for preset in presets[family]:
knobs.update({preset: presets[family][preset]})
for preset in cls.knobs[family]:
knobs.update({preset: cls.knobs[family][preset]})
# Get invalid knobs.
nodes = []