mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into feature/unify_subprocess_calls
# Conflicts: # pype/plugins/global/publish/extract_burnin.py
This commit is contained in:
commit
7607eaf86b
40 changed files with 742 additions and 912 deletions
|
|
@ -1,28 +1,15 @@
|
|||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
import nuke
|
||||
|
||||
from avalon import api as avalon
|
||||
from avalon.tools import workfiles
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from .. import api
|
||||
|
||||
from pype.nuke import menu
|
||||
import logging
|
||||
|
||||
from .lib import (
|
||||
create_write_node
|
||||
)
|
||||
|
||||
import nuke
|
||||
|
||||
from pypeapp import Logger
|
||||
|
||||
# #removing logger handler created in avalon_core
|
||||
# for name, handler in [(handler.get_name(), handler)
|
||||
# for handler in Logger.logging.root.handlers[:]]:
|
||||
# if "pype" not in str(name).lower():
|
||||
# Logger.logging.root.removeHandler(handler)
|
||||
from . import lib
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
|
|
@ -138,6 +125,9 @@ def install():
|
|||
if launch_workfiles:
|
||||
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
|
||||
|
||||
# Set context settings.
|
||||
nuke.addOnCreate(lib.set_context_settings, nodeClass="Root")
|
||||
|
||||
|
||||
def launch_workfiles_app():
|
||||
if not self.workfiles_launched:
|
||||
|
|
|
|||
196
pype/nuke/lib.py
196
pype/nuke/lib.py
|
|
@ -18,6 +18,7 @@ log = Logger().get_logger(__name__, "nuke")
|
|||
self = sys.modules[__name__]
|
||||
self._project = None
|
||||
|
||||
|
||||
def onScriptLoad():
|
||||
if nuke.env['LINUX']:
|
||||
nuke.tcl('load ffmpegReader')
|
||||
|
|
@ -43,7 +44,7 @@ def checkInventoryVersions():
|
|||
container = avalon.nuke.parse_container(each)
|
||||
|
||||
if container:
|
||||
node = container["_tool"]
|
||||
node = container["_node"]
|
||||
avalon_knob_data = get_avalon_knob_data(node)
|
||||
|
||||
# get representation from io
|
||||
|
|
@ -102,6 +103,9 @@ def writes_version_sync():
|
|||
|
||||
node_new_file = node_file.replace(node_version, new_version)
|
||||
each['file'].setValue(node_new_file)
|
||||
if not os.path.isdir(os.path.dirname(node_new_file)):
|
||||
log.info("path does not exist")
|
||||
os.makedirs(os.path.dirname(node_new_file), 0o766)
|
||||
except Exception as e:
|
||||
log.debug(
|
||||
"Write node: `{}` has no version in path: {}".format(each.name(), e))
|
||||
|
|
@ -172,7 +176,32 @@ def script_name():
|
|||
return nuke.root().knob('name').value()
|
||||
|
||||
|
||||
def create_write_node(name, data):
|
||||
def create_write_node(name, data, prenodes=None):
|
||||
'''Creating write node which is group node
|
||||
|
||||
Arguments:
|
||||
name (str): name of node
|
||||
data (dict): data to be imprinted
|
||||
prenodes (list, optional): list of lists, definitions for nodes
|
||||
to be created before write
|
||||
|
||||
Example:
|
||||
prenodes = [(
|
||||
"NameNode", # string
|
||||
"NodeClass", # string
|
||||
( # OrderDict: knob and values pairs
|
||||
("knobName", "knobValue"),
|
||||
("knobName", "knobValue")
|
||||
),
|
||||
( # list inputs
|
||||
"firstPrevNodeName",
|
||||
"secondPrevNodeName"
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
'''
|
||||
|
||||
nuke_dataflow_writes = get_node_dataflow_preset(**data)
|
||||
nuke_colorspace_writes = get_node_colorspace_preset(**data)
|
||||
application = lib.get_application(os.environ["AVALON_APP_NAME"])
|
||||
|
|
@ -191,10 +220,11 @@ def create_write_node(name, data):
|
|||
|
||||
# build file path to workfiles
|
||||
fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/")
|
||||
fpath = '{work}/renders/v{version}/{subset}.{frame}.{ext}'.format(
|
||||
fpath = data["fpath_template"].format(
|
||||
work=fpath, version=data["version"], subset=data["subset"],
|
||||
frame=data["frame"],
|
||||
ext=data["nuke_dataflow_writes"]["file_type"])
|
||||
ext=data["nuke_dataflow_writes"]["file_type"]
|
||||
)
|
||||
|
||||
# create directory
|
||||
if not os.path.isdir(os.path.dirname(fpath)):
|
||||
|
|
@ -219,14 +249,89 @@ def create_write_node(name, data):
|
|||
log.debug(_data)
|
||||
|
||||
_data["frame_range"] = data.get("frame_range", None)
|
||||
log.info("__ _data3: {}".format(_data))
|
||||
instance = avalon.nuke.lib.add_write_node(
|
||||
name,
|
||||
**_data
|
||||
)
|
||||
instance = avalon.nuke.lib.imprint(instance, data["avalon"])
|
||||
add_rendering_knobs(instance)
|
||||
return instance
|
||||
|
||||
# todo: hange this to new way
|
||||
GN = nuke.createNode("Group", "name {}".format(name))
|
||||
|
||||
prev_node = None
|
||||
with GN:
|
||||
# creating pre-write nodes `prenodes`
|
||||
if prenodes:
|
||||
for name, klass, properties, set_input_to in prenodes:
|
||||
# create node
|
||||
now_node = nuke.createNode(klass, "name {}".format(name))
|
||||
|
||||
# add data to knob
|
||||
for k, v in properties:
|
||||
if k and v:
|
||||
now_node[k].serValue(str(v))
|
||||
|
||||
# connect to previous node
|
||||
if set_input_to:
|
||||
if isinstance(set_input_to, (tuple or list)):
|
||||
for i, node_name in enumerate(set_input_to):
|
||||
input_node = nuke.toNode(node_name)
|
||||
now_node.setInput(1, input_node)
|
||||
elif isinstance(set_input_to, str):
|
||||
input_node = nuke.toNode(set_input_to)
|
||||
now_node.setInput(0, input_node)
|
||||
else:
|
||||
now_node.setInput(0, prev_node)
|
||||
|
||||
# swith actual node to previous
|
||||
prev_node = now_node
|
||||
else:
|
||||
prev_node = nuke.createNode("Input", "name rgba")
|
||||
|
||||
|
||||
# creating write node
|
||||
now_node = avalon.nuke.lib.add_write_node("inside_{}".format(name),
|
||||
**_data
|
||||
)
|
||||
write_node = now_node
|
||||
# connect to previous node
|
||||
now_node.setInput(0, prev_node)
|
||||
|
||||
# swith actual node to previous
|
||||
prev_node = now_node
|
||||
|
||||
now_node = nuke.createNode("Output", "name write")
|
||||
|
||||
# connect to previous node
|
||||
now_node.setInput(0, prev_node)
|
||||
|
||||
# imprinting group node
|
||||
GN = avalon.nuke.imprint(GN, data["avalon"])
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
||||
add_rendering_knobs(GN)
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
||||
# set tile color
|
||||
tile_color = _data.get("tile_color", "0xff0000ff")
|
||||
GN["tile_color"].setValue(tile_color)
|
||||
|
||||
|
||||
# add render button
|
||||
lnk = nuke.Link_Knob("Render")
|
||||
lnk.makeLink(write_node.name(), "Render")
|
||||
lnk.setName("Render")
|
||||
GN.addKnob(lnk)
|
||||
|
||||
# linking knobs to group property panel
|
||||
linking_knobs = ["first", "last", "use_limit"]
|
||||
for k in linking_knobs:
|
||||
lnk = nuke.Link_Knob(k)
|
||||
lnk.makeLink(write_node.name(), k)
|
||||
lnk.setName(k.replace('_', ' ').capitalize())
|
||||
lnk.clearFlag(nuke.STARTLINE)
|
||||
GN.addKnob(lnk)
|
||||
|
||||
return GN
|
||||
|
||||
|
||||
def add_rendering_knobs(node):
|
||||
|
|
@ -389,29 +494,24 @@ def reset_frame_range_handles():
|
|||
# setting active viewers
|
||||
nuke.frame(int(asset["data"]["fstart"]))
|
||||
|
||||
try:
|
||||
vv = nuke.activeViewer().node()
|
||||
except AttributeError:
|
||||
log.error("No active viewer. Select any node and hit num `1`")
|
||||
return
|
||||
|
||||
range = '{0}-{1}'.format(
|
||||
int(asset["data"]["fstart"]),
|
||||
int(asset["data"]["fend"]))
|
||||
|
||||
vv['frame_range'].setValue(range)
|
||||
vv['frame_range_lock'].setValue(True)
|
||||
for node in nuke.allNodes(filter="Viewer"):
|
||||
node['frame_range'].setValue(range)
|
||||
node['frame_range_lock'].setValue(True)
|
||||
|
||||
log.info("_frameRange: {}".format(range))
|
||||
log.info("frameRange: {}".format(vv['frame_range'].value()))
|
||||
log.info("_frameRange: {}".format(range))
|
||||
log.info("frameRange: {}".format(node['frame_range'].value()))
|
||||
|
||||
vv['frame_range'].setValue(range)
|
||||
vv['frame_range_lock'].setValue(True)
|
||||
node['frame_range'].setValue(range)
|
||||
node['frame_range_lock'].setValue(True)
|
||||
|
||||
# adding handle_start/end to root avalon knob
|
||||
if not avalon.nuke.set_avalon_knob_data(root, {
|
||||
"handle_start": handle_start,
|
||||
"handle_end": handle_end
|
||||
"handle_start": int(handle_start),
|
||||
"handle_end": int(handle_end)
|
||||
}):
|
||||
log.warning("Cannot set Avalon knob to Root node!")
|
||||
|
||||
|
|
@ -432,34 +532,26 @@ def reset_resolution():
|
|||
asset = api.Session["AVALON_ASSET"]
|
||||
asset = io.find_one({"name": asset, "type": "asset"})
|
||||
|
||||
try:
|
||||
width = asset.get('data', {}).get('resolution_width', 1920)
|
||||
height = asset.get('data', {}).get('resolution_height', 1080)
|
||||
pixel_aspect = asset.get('data', {}).get('pixel_aspect', 1)
|
||||
bbox = asset.get('data', {}).get('crop', "0.0.1920.1080")
|
||||
width = asset.get('data', {}).get('resolution_width')
|
||||
height = asset.get('data', {}).get('resolution_height')
|
||||
pixel_aspect = asset.get('data', {}).get('pixel_aspect')
|
||||
|
||||
if bbox not in "0.0.1920.1080":
|
||||
try:
|
||||
x, y, r, t = bbox.split(".")
|
||||
except Exception as e:
|
||||
x = 0
|
||||
y = 0
|
||||
r = width
|
||||
t = height
|
||||
bbox = None
|
||||
log.error("{}: {} \nFormat:Crop need to be set with dots, example: "
|
||||
"0.0.1920.1080, /nSetting to default".format(__name__, e))
|
||||
else:
|
||||
bbox = None
|
||||
|
||||
except KeyError:
|
||||
log.warning(
|
||||
"No resolution information found for \"{0}\".".format(
|
||||
project["name"]
|
||||
)
|
||||
)
|
||||
log.info("pixel_aspect: {}".format(pixel_aspect))
|
||||
if any(not x for x in [width, height, pixel_aspect]):
|
||||
log.error("Missing set shot attributes in DB. \nContact your supervisor!. \n\nWidth: `{0}` \nHeight: `{1}` \nPixel Asspect: `{2}`".format(
|
||||
width, height, pixel_aspect))
|
||||
return
|
||||
|
||||
bbox = asset.get('data', {}).get('crop')
|
||||
|
||||
if bbox:
|
||||
try:
|
||||
x, y, r, t = bbox.split(".")
|
||||
except Exception as e:
|
||||
bbox = None
|
||||
log.error("{}: {} \nFormat:Crop need to be set with dots, example: "
|
||||
"0.0.1920.1080, /nSetting to default".format(__name__, e))
|
||||
|
||||
used_formats = list()
|
||||
for f in nuke.formats():
|
||||
if project["name"] in str(f.name()):
|
||||
|
|
@ -616,7 +708,7 @@ def get_hierarchical_attr(entity, attr, default=None):
|
|||
# dict
|
||||
# """
|
||||
#
|
||||
# node = container["_tool"]
|
||||
# node = container["_node"]
|
||||
# tile_color = node['tile_color'].value()
|
||||
# if tile_color is None:
|
||||
# return {}
|
||||
|
|
|
|||
27
pype/plugins/ftrack/publish/integrate_remove_components.py
Normal file
27
pype/plugins/ftrack/publish/integrate_remove_components.py
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
|
||||
|
||||
class IntegrateCleanComponentData(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Cleaning up thumbnail an mov files after they have been integrated
|
||||
"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.5
|
||||
label = 'Clean component data'
|
||||
families = ["ftrack"]
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
for comp in instance.data['representations']:
|
||||
self.log.debug('component {}'.format(comp))
|
||||
|
||||
if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])):
|
||||
os.remove(comp['published_path'])
|
||||
self.log.info('Thumbnail image was erased')
|
||||
|
||||
elif comp.get('preview') or ("preview" in comp.get('tags', [])):
|
||||
os.remove(comp['published_path'])
|
||||
self.log.info('Preview mov file was erased')
|
||||
|
|
@ -3,11 +3,33 @@ import shutil
|
|||
import pyblish.api
|
||||
|
||||
|
||||
def clean_renders(instance):
|
||||
transfers = instance.data.get("transfers", list())
|
||||
|
||||
current_families = instance.data.get("families", list())
|
||||
instance_family = instance.data.get("family", None)
|
||||
dirnames = []
|
||||
|
||||
for src, dest in transfers:
|
||||
if os.path.normpath(src) != os.path.normpath(dest):
|
||||
if instance_family == 'render' or 'render' in current_families:
|
||||
os.remove(src)
|
||||
dirnames.append(os.path.dirname(src))
|
||||
|
||||
# make unique set
|
||||
cleanup_dirs = set(dirnames)
|
||||
for dir in cleanup_dirs:
|
||||
try:
|
||||
os.rmdir(dir)
|
||||
except OSError:
|
||||
# directory is not empty, skipping
|
||||
continue
|
||||
|
||||
|
||||
class CleanUp(pyblish.api.InstancePlugin):
|
||||
"""Cleans up the staging directory after a successful publish.
|
||||
|
||||
The removal will only happen for staging directories which are inside the
|
||||
temporary folder, otherwise the folder is ignored.
|
||||
This will also clean published renders and delete their parent directories.
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -36,3 +58,5 @@ class CleanUp(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.info("Removing temporary folder ...")
|
||||
shutil.rmtree(staging_dir)
|
||||
self.log.info("Cleaning renders ...")
|
||||
clean_renders(instance)
|
||||
|
|
|
|||
|
|
@ -160,10 +160,13 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
|
||||
# Get family from the data
|
||||
families = data.get("families", ["render"])
|
||||
assert isinstance(families, (list, tuple)), "Must be iterable"
|
||||
assert families, "Must have at least a single family"
|
||||
families.append("ftrack")
|
||||
families.append("review")
|
||||
if "render" not in families:
|
||||
families.append("render")
|
||||
if "ftrack" not in families:
|
||||
families.append("ftrack")
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
|
||||
for collection in collections:
|
||||
instance = context.create_instance(str(collection))
|
||||
self.log.info("Collection: %s" % list(collection))
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
label = "Quicktime with burnins"
|
||||
order = pyblish.api.ExtractorOrder + 0.03
|
||||
families = ["review", "burnin"]
|
||||
hosts = ["nuke", "maya", "shell"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -61,10 +62,23 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
self.log.debug("__ burnin_data2: {}".format(burnin_data))
|
||||
|
||||
json_data = json.dumps(burnin_data)
|
||||
scriptpath = os.path.normpath(os.path.join(os.environ['PYPE_MODULE_ROOT'],
|
||||
"pype",
|
||||
"scripts",
|
||||
"otio_burnin.py"))
|
||||
|
||||
# Get script path.
|
||||
module_path = os.environ['PYPE_MODULE_ROOT']
|
||||
|
||||
# There can be multiple paths in PYPE_MODULE_ROOT, in which case
|
||||
# we just take first one.
|
||||
if os.pathsep in module_path:
|
||||
module_path = module_path.split(os.pathsep)[0]
|
||||
|
||||
scriptpath = os.path.normpath(
|
||||
os.path.join(
|
||||
module_path,
|
||||
"pype",
|
||||
"scripts",
|
||||
"otio_burnin.py"
|
||||
)
|
||||
)
|
||||
|
||||
self.log.debug("__ scriptpath: {}".format(scriptpath))
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
label = "Extract Review"
|
||||
order = pyblish.api.ExtractorOrder + 0.02
|
||||
families = ["review"]
|
||||
hosts = ["nuke", "maya", "shell"]
|
||||
|
||||
def process(self, instance):
|
||||
# adding plugin attributes from presets
|
||||
|
|
@ -109,12 +110,42 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
# necessary input data
|
||||
# adds start arg only if image sequence
|
||||
if "mov" not in repre_new['ext']:
|
||||
if isinstance(repre["files"], list):
|
||||
input_args.append("-start_number {0} -framerate {1}".format(
|
||||
start_frame, fps))
|
||||
|
||||
input_args.append("-i {}".format(full_input_path))
|
||||
|
||||
for audio in instance.data.get("audio", []):
|
||||
offset_frames = (
|
||||
instance.data.get("startFrameReview") -
|
||||
audio["offset"]
|
||||
)
|
||||
offset_seconds = offset_frames / fps
|
||||
|
||||
if offset_seconds > 0:
|
||||
input_args.append("-ss")
|
||||
else:
|
||||
input_args.append("-itsoffset")
|
||||
|
||||
input_args.append(str(abs(offset_seconds)))
|
||||
|
||||
input_args.extend(
|
||||
["-i", audio["filename"]]
|
||||
)
|
||||
|
||||
# Need to merge audio if there are more
|
||||
# than 1 input.
|
||||
if len(instance.data["audio"]) > 1:
|
||||
input_args.extend(
|
||||
[
|
||||
"-filter_complex",
|
||||
"amerge",
|
||||
"-ac",
|
||||
"2"
|
||||
]
|
||||
)
|
||||
|
||||
output_args = []
|
||||
# preset's output data
|
||||
output_args.extend(profile.get('output', []))
|
||||
|
|
@ -126,6 +157,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
output_args.append(
|
||||
"-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
|
||||
|
||||
# In case audio is longer than video.
|
||||
output_args.append("-shortest")
|
||||
|
||||
# output filename
|
||||
output_args.append(full_output_path)
|
||||
mov_args = [
|
||||
|
|
|
|||
|
|
@ -403,20 +403,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.info("Registered {} items".format(len(representations)))
|
||||
|
||||
def integrate(self, instance):
|
||||
"""Move the files
|
||||
""" Move the files.
|
||||
|
||||
Through `instance.data["transfers"]`
|
||||
Through `instance.data["transfers"]`
|
||||
|
||||
Args:
|
||||
instance: the instance to integrate
|
||||
Args:
|
||||
instance: the instance to integrate
|
||||
"""
|
||||
|
||||
transfers = instance.data.get("transfers", list())
|
||||
|
||||
for src, dest in transfers:
|
||||
if os.path.normpath(src) != os.path.normpath(dest):
|
||||
self.copy_file(src, dest)
|
||||
|
||||
# Produce hardlinked copies
|
||||
# Note: hardlink can only be produced between two files on the same
|
||||
# server/disk and editing one of the two will edit both files at once.
|
||||
|
|
|
|||
|
|
@ -90,6 +90,9 @@ class ImagePlaneLoader(api.Loader):
|
|||
)
|
||||
image_plane_shape.frameExtension.set(start_frame)
|
||||
|
||||
# Ensure OpenEXRLoader plugin is loaded.
|
||||
pc.loadPlugin("OpenEXRLoader.mll", quiet=True)
|
||||
|
||||
new_nodes.extend(
|
||||
[image_plane_transform.name(), image_plane_shape.name()]
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from maya import cmds
|
||||
from maya import cmds, mel
|
||||
import pymel.core as pm
|
||||
|
||||
import pyblish.api
|
||||
|
|
@ -76,3 +76,39 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
instance.data["families"] = ['ftrack']
|
||||
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
|
||||
# Collect audio
|
||||
playback_slider = mel.eval('$tmpVar=$gPlayBackSlider')
|
||||
audio_name = cmds.timeControl(playback_slider, q=True, s=True)
|
||||
display_sounds = cmds.timeControl(
|
||||
playback_slider, q=True, displaySound=True
|
||||
)
|
||||
|
||||
audio_nodes = []
|
||||
|
||||
if audio_name:
|
||||
audio_nodes.append(pm.PyNode(audio_name))
|
||||
|
||||
if not audio_name and display_sounds:
|
||||
start_frame = int(pm.playbackOptions(q=True, min=True))
|
||||
end_frame = float(pm.playbackOptions(q=True, max=True))
|
||||
frame_range = range(int(start_frame), int(end_frame))
|
||||
|
||||
for node in pm.ls(type="audio"):
|
||||
# Check if frame range and audio range intersections,
|
||||
# for whether to include this audio node or not.
|
||||
start_audio = node.offset.get()
|
||||
end_audio = node.offset.get() + node.duration.get()
|
||||
audio_range = range(int(start_audio), int(end_audio))
|
||||
|
||||
if bool(set(frame_range).intersection(audio_range)):
|
||||
audio_nodes.append(node)
|
||||
|
||||
instance.data["audio"] = []
|
||||
for node in audio_nodes:
|
||||
instance.data["audio"].append(
|
||||
{
|
||||
"offset": node.offset.get(),
|
||||
"filename": node.filename.get()
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
'endFrame': end,
|
||||
'frameRate': fps,
|
||||
'preview': True,
|
||||
'tags': ['review']
|
||||
'tags': ['review', 'delete']
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ def get_renderer_variables(renderlayer=None):
|
|||
# returns an index number.
|
||||
filename_base = os.path.basename(filename_0)
|
||||
extension = os.path.splitext(filename_base)[-1].strip(".")
|
||||
filename_prefix = "<Scene>/<RenderLayer>/<RenderLayer>"
|
||||
filename_prefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
|
||||
|
||||
return {"ext": extension,
|
||||
"filename_prefix": filename_prefix,
|
||||
|
|
@ -77,8 +77,19 @@ def preview_fname(folder, scene, layer, padding, ext):
|
|||
|
||||
"""
|
||||
|
||||
# Following hardcoded "<Scene>/<Scene>_<Layer>/<Layer>"
|
||||
output = "{scene}/{layer}/{layer}.{number}.{ext}".format(
|
||||
fileprefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
|
||||
output = fileprefix + ".{number}.{ext}"
|
||||
# RenderPass is currently hardcoded to "beauty" because its not important
|
||||
# for the deadline submission, but we will need something to replace
|
||||
# "<RenderPass>".
|
||||
mapping = {
|
||||
"<Scene>": "{scene}",
|
||||
"<RenderLayer>": "{layer}",
|
||||
"RenderPass": "beauty"
|
||||
}
|
||||
for key, value in mapping.items():
|
||||
output = output.replace(key, value)
|
||||
output = output.format(
|
||||
scene=scene,
|
||||
layer=layer,
|
||||
number="#" * padding,
|
||||
|
|
|
|||
|
|
@ -250,8 +250,15 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
render publish job and submit job to farm.
|
||||
"""
|
||||
# setup muster environment
|
||||
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL",
|
||||
"https://localhost:9891")
|
||||
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
|
||||
|
||||
if self.MUSTER_REST_URL is None:
|
||||
self.log.debug(
|
||||
"\"MUSTER_REST_URL\" is not found. Skipping "
|
||||
"\"{}\".".format(instance)
|
||||
)
|
||||
return
|
||||
|
||||
self._load_credentials()
|
||||
self._authenticate()
|
||||
self._get_templates()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
import maya.cmds as cmds
|
||||
import os
|
||||
|
||||
from maya import cmds, mel
|
||||
import pymel.core as pm
|
||||
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
|
|
@ -9,9 +12,9 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
"""Validates the global render settings
|
||||
|
||||
* File Name Prefix must be as followed:
|
||||
* vray: <Scene>/<Layer>/<Layer>
|
||||
* arnold: <Scene>/<RenderLayer>/<RenderLayer>
|
||||
* default: <Scene>/<RenderLayer>/<RenderLayer>
|
||||
* vray: maya/<Layer>/<Layer>
|
||||
* arnold: maya/<RenderLayer>/<RenderLayer>
|
||||
* default: maya/<RenderLayer>/<RenderLayer>
|
||||
|
||||
* Frame Padding must be:
|
||||
* default: 4
|
||||
|
|
@ -34,8 +37,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
actions = [pype.api.RepairAction]
|
||||
|
||||
DEFAULT_PADDING = 4
|
||||
RENDERER_PREFIX = {"vray": "<Scene>/<Layer>/<Layer>"}
|
||||
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
|
||||
RENDERER_PREFIX = {"vray": "maya/<Layer>/<Layer>"}
|
||||
DEFAULT_PREFIX = "maya/<RenderLayer>/<RenderLayer>_<RenderPass>"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -66,8 +69,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
cls.log.error("Animation needs to be enabled. Use the same "
|
||||
"frame for start and end to render single frame")
|
||||
|
||||
fname_prefix = cls.RENDERER_PREFIX.get(renderer,
|
||||
cls.DEFAULT_PREFIX)
|
||||
fname_prefix = cls.get_prefix(renderer)
|
||||
|
||||
if prefix != fname_prefix:
|
||||
invalid = True
|
||||
cls.log.error("Wrong file name prefix: %s (expected: %s)"
|
||||
|
|
@ -80,6 +83,21 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
return invalid
|
||||
|
||||
@classmethod
|
||||
def get_prefix(cls, renderer):
|
||||
prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX)
|
||||
# maya.cmds and pymel.core return only default project directory and
|
||||
# not the current one but only default.
|
||||
output_path = os.path.join(
|
||||
mel.eval("workspace -q -rd;"), pm.workspace.fileRules["images"]
|
||||
)
|
||||
# Workfile paths can be configured to have host name in file path.
|
||||
# In this case we want to avoid duplicate folder names.
|
||||
if "maya" in output_path.lower():
|
||||
prefix = prefix.replace("maya/", "")
|
||||
|
||||
return prefix
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
||||
|
|
@ -94,7 +112,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
node = render_attrs["node"]
|
||||
prefix_attr = render_attrs["prefix"]
|
||||
|
||||
fname_prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX)
|
||||
fname_prefix = cls.get_prefix(renderer)
|
||||
cmds.setAttr("{}.{}".format(node, prefix_attr),
|
||||
fname_prefix, type="string")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,252 +0,0 @@
|
|||
import os
|
||||
import contextlib
|
||||
|
||||
from avalon import api
|
||||
import avalon.io as io
|
||||
|
||||
from avalon.nuke import log
|
||||
import nuke
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_inputs(node, knobs):
|
||||
"""Preserve the node's inputs after context"""
|
||||
|
||||
values = {}
|
||||
for name in knobs:
|
||||
try:
|
||||
knob_value = node[name].vaule()
|
||||
values[name] = knob_value
|
||||
except ValueError:
|
||||
log.warning("missing knob {} in node {}"
|
||||
"{}".format(name, node['name'].value()))
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for name, value in values.items():
|
||||
node[name].setValue(value)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_trim(node):
|
||||
"""Preserve the relative trim of the Loader tool.
|
||||
|
||||
This tries to preserve the loader's trim (trim in and trim out) after
|
||||
the context by reapplying the "amount" it trims on the clip's length at
|
||||
start and end.
|
||||
|
||||
"""
|
||||
# working script frame range
|
||||
script_start = nuke.root()["start_frame"].value()
|
||||
|
||||
start_at_frame = None
|
||||
offset_frame = None
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start))
|
||||
log.info("start frame of reader was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str((script_start + offset_frame)))
|
||||
log.info("start frame of reader was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
|
||||
def loader_shift(node, frame, relative=True):
|
||||
"""Shift global in time by i preserving duration
|
||||
|
||||
This moves the loader by i frames preserving global duration. When relative
|
||||
is False it will shift the global in to the start frame.
|
||||
|
||||
Args:
|
||||
loader (tool): The fusion loader tool.
|
||||
frame (int): The amount of frames to move.
|
||||
relative (bool): When True the shift is relative, else the shift will
|
||||
change the global in to frame.
|
||||
|
||||
Returns:
|
||||
int: The resulting relative frame change (how much it moved)
|
||||
|
||||
"""
|
||||
# working script frame range
|
||||
script_start = nuke.root()["start_frame"].value()
|
||||
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
if relative:
|
||||
shift = frame
|
||||
else:
|
||||
if start_at_frame:
|
||||
shift = frame
|
||||
if offset_frame:
|
||||
shift = frame + offset_frame
|
||||
|
||||
# Shifting global in will try to automatically compensate for the change
|
||||
# in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
|
||||
# input values to "just shift" the clip
|
||||
with preserve_inputs(node, knobs=["file",
|
||||
"first",
|
||||
"last",
|
||||
"originfirst",
|
||||
"originlast",
|
||||
"frame_mode",
|
||||
"frame"]):
|
||||
|
||||
# GlobalIn cannot be set past GlobalOut or vice versa
|
||||
# so we must apply them in the order of the shift.
|
||||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start + shift))
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str(shift))
|
||||
|
||||
return int(shift)
|
||||
|
||||
|
||||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["write"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Load sequence"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
ls_img_sequence,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
log.info("here i am")
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
# Use the first file for now
|
||||
# TODO: fix path fname
|
||||
file = ls_img_sequence(os.path.dirname(self.fname), one=True)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
# TODO: it might be universal read to img/geo/camera
|
||||
r = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(self.name)) # TODO: does self.name exist?
|
||||
r["file"].setValue(file['path'])
|
||||
if len(file['frames']) is 1:
|
||||
first = file['frames'][0][0]
|
||||
last = file['frames'][0][1]
|
||||
r["originfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["originlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
else:
|
||||
first = file['frames'][0][0]
|
||||
last = file['frames'][:-1][1]
|
||||
r["originfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["originlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
log.warning("Missing frames in image sequence")
|
||||
|
||||
# Set global in point to start frame (if in version.data)
|
||||
start = context["version"]["data"].get("startFrame", None)
|
||||
if start is not None:
|
||||
loader_shift(r, start, relative=False)
|
||||
|
||||
containerise(r,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Fusion automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
viewer_update_and_undo_stop,
|
||||
ls_img_sequence,
|
||||
update_container
|
||||
)
|
||||
log.info("this i can see")
|
||||
node = container["_tool"]
|
||||
# TODO: prepare also for other readers img/geo/camera
|
||||
assert node.Class() == "Reader", "Must be Reader"
|
||||
|
||||
root = api.get_representation_path(representation)
|
||||
file = ls_img_sequence(os.path.dirname(root), one=True)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({"type": "version",
|
||||
"_id": representation["parent"]})
|
||||
start = version["data"].get("startFrame")
|
||||
if start is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(node['name'].value(), representation))
|
||||
start = 0
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
|
||||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
with preserve_inputs(node,
|
||||
knobs=["file",
|
||||
"first",
|
||||
"last",
|
||||
"originfirst",
|
||||
"originlast",
|
||||
"frame_mode",
|
||||
"frame"]):
|
||||
node["file"] = file["path"]
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
global_in_changed = loader_shift(node, start, relative=False)
|
||||
if global_in_changed:
|
||||
# Log this change to the user
|
||||
log.debug("Changed '{}' global in:"
|
||||
" {:d}".format(node['name'].value(), start))
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
node,
|
||||
{"representation": str(representation["_id"])}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
node = container["_tool"]
|
||||
assert node.Class() == "Reader", "Must be Reader"
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
# type: render
|
||||
# if no render type node in script then first is having in name [master] for definition of main script renderer
|
||||
# colorspace setting from templates
|
||||
# dataflow setting from templates
|
||||
|
||||
# type: mask_render
|
||||
# created with shuffle gizmo for RGB separation into davinci matte
|
||||
# colorspace setting from templates
|
||||
# dataflow setting from templates
|
||||
|
||||
# type: prerender
|
||||
# backdrop with write and read
|
||||
# colorspace setting from templates
|
||||
# dataflow setting from templates
|
||||
|
||||
# type: geo
|
||||
# dataflow setting from templates
|
||||
|
|
@ -1,11 +1,9 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.api
|
||||
import avalon.nuke
|
||||
from pype.nuke import (
|
||||
create_write_node
|
||||
)
|
||||
from pype.nuke.lib import create_write_node
|
||||
from pype import api as pype
|
||||
# from pypeapp import Logger
|
||||
from pypeapp import config
|
||||
|
||||
import nuke
|
||||
|
||||
|
|
@ -33,6 +31,11 @@ class CreateWriteRender(avalon.nuke.Creator):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWriteRender, self).__init__(*args, **kwargs)
|
||||
self.presets = config.get_presets()['plugins']["nuke"]["create"].get(
|
||||
self.__class__.__name__, {}
|
||||
)
|
||||
|
||||
self.name = self.data["subset"]
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
|
|
@ -44,7 +47,6 @@ class CreateWriteRender(avalon.nuke.Creator):
|
|||
self.data = data
|
||||
|
||||
def process(self):
|
||||
self.name = self.data["subset"]
|
||||
|
||||
family = self.family
|
||||
node = 'write'
|
||||
|
|
@ -58,6 +60,16 @@ class CreateWriteRender(avalon.nuke.Creator):
|
|||
"avalon": self.data
|
||||
}
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
{"fpath_template": self.presets["fpath_template"]}
|
||||
)
|
||||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
|
||||
|
||||
create_write_node(self.data["subset"], write_data)
|
||||
|
||||
return
|
||||
|
|
@ -77,6 +89,9 @@ class CreateWritePrerender(avalon.nuke.Creator):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWritePrerender, self).__init__(*args, **kwargs)
|
||||
self.presets = config.get_presets()['plugins']["nuke"]["create"].get(
|
||||
self.__class__.__name__, {}
|
||||
)
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
|
|
@ -100,56 +115,16 @@ class CreateWritePrerender(avalon.nuke.Creator):
|
|||
"avalon": self.data
|
||||
}
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
{"fpath_template": self.presets["fpath_template"]}
|
||||
)
|
||||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"})
|
||||
|
||||
create_write_node(self.data["subset"], write_data)
|
||||
|
||||
return
|
||||
|
||||
|
||||
"""
|
||||
class CrateWriteStill(avalon.nuke.Creator):
|
||||
# change this to template preset
|
||||
preset = "still"
|
||||
|
||||
name = "WriteStill"
|
||||
label = "Create Write Still"
|
||||
hosts = ["nuke"]
|
||||
family = "{}_write".format(preset)
|
||||
families = preset
|
||||
icon = "image"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CrateWriteStill, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
data["family"] = self.family.split("_")[-1]
|
||||
data["families"] = self.families
|
||||
|
||||
{data.update({k: v}) for k, v in self.data.items()
|
||||
if k not in data.keys()}
|
||||
self.data = data
|
||||
|
||||
def process(self):
|
||||
self.name = self.data["subset"]
|
||||
|
||||
node_name = self.data["subset"].replace(
|
||||
"_", "_f{}_".format(nuke.frame()))
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
self.data["subset"] = node_name
|
||||
|
||||
family = self.family
|
||||
node = 'write'
|
||||
|
||||
if not instance:
|
||||
write_data = {
|
||||
"frame_range": [nuke.frame(), nuke.frame()],
|
||||
"class": node,
|
||||
"preset": self.preset,
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
nuke.createNode("FrameHold", "first_frame {}".format(nuke.frame()))
|
||||
create_write_node(node_name, write_data)
|
||||
|
||||
return
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class SelectContainers(api.InventoryAction):
|
|||
|
||||
import avalon.nuke
|
||||
|
||||
nodes = [i["_tool"] for i in containers]
|
||||
nodes = [i["_node"] for i in containers]
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
# clear previous_selection
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
#
|
||||
# # Get tool color
|
||||
# first = containers[0]
|
||||
# tool = first["_tool"]
|
||||
# tool = first["_node"]
|
||||
# color = tool.TileColor
|
||||
#
|
||||
# if color is not None:
|
||||
|
|
@ -40,7 +40,7 @@
|
|||
# rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
|
||||
#
|
||||
# # Update tool
|
||||
# tool = container["_tool"]
|
||||
# tool = container["_node"]
|
||||
# tool.TileColor = rgb_f_table
|
||||
#
|
||||
# result.append(container)
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ def loader_shift(node, frame, relative=True):
|
|||
class LoadMov(api.Loader):
|
||||
"""Load mov file into Nuke"""
|
||||
|
||||
families = ["write", "source", "plate", "render"]
|
||||
families = ["write", "source", "plate", "render", "review"]
|
||||
representations = ["mov", "preview", "review", "mp4"]
|
||||
|
||||
label = "Load mov"
|
||||
|
|
@ -182,7 +182,6 @@ class LoadMov(api.Loader):
|
|||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
ls_img_sequence,
|
||||
update_container
|
||||
)
|
||||
|
||||
|
|
@ -190,8 +189,7 @@ class LoadMov(api.Loader):
|
|||
# TODO: prepare also for other Read img/geo/camera
|
||||
assert node.Class() == "Read", "Must be Read"
|
||||
|
||||
root = api.get_representation_path(representation)
|
||||
file = ls_img_sequence(os.path.dirname(root), one=True)
|
||||
file = api.get_representation_path(representation)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
|
|
@ -238,7 +236,7 @@ class LoadMov(api.Loader):
|
|||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
node["file"].setValue(file["path"])
|
||||
log.info("__ node['file']: {}".format(node["file"]))
|
||||
log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
loader_shift(node, first, relative=True)
|
||||
|
|
|
|||
|
|
@ -179,8 +179,8 @@ class LoadSequence(api.Loader):
|
|||
# TODO: prepare also for other Read img/geo/camera
|
||||
assert node.Class() == "Read", "Must be Read"
|
||||
|
||||
root = api.get_representation_path(representation)
|
||||
file = ls_img_sequence(os.path.dirname(root), one=True)
|
||||
path = api.get_representation_path(representation)
|
||||
file = ls_img_sequence(path)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
|
|
@ -222,7 +222,7 @@ class LoadSequence(api.Loader):
|
|||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
node["file"].setValue(file["path"])
|
||||
log.info("__ node['file']: {}".format(node["file"]))
|
||||
log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
loader_shift(node, first, relative=True)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import os
|
|||
import nuke
|
||||
import pyblish.api
|
||||
from avalon import io, api
|
||||
from pype.nuke.lib import get_avalon_knob_data
|
||||
from avalon.nuke.lib import get_avalon_knob_data
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
|
|
@ -34,6 +34,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# get data from avalon knob
|
||||
avalon_knob_data = get_avalon_knob_data(node)
|
||||
self.log.debug("avalon_knob_data: {}".format(avalon_knob_data))
|
||||
if not avalon_knob_data:
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -11,5 +11,5 @@ class CollectWorkfileVersion(pyblish.api.ContextPlugin):
|
|||
|
||||
project = context.data('activeProject')
|
||||
path = project.path()
|
||||
context.data["version"] = pype.get_version_from_path(path)
|
||||
context.data["version"] = int(pype.get_version_from_path(path))
|
||||
self.log.info("version: {}".format(context.data["version"]))
|
||||
|
|
@ -78,9 +78,8 @@ class CollectClips(api.ContextPlugin):
|
|||
"sourceFirst": source_first_frame,
|
||||
"sourceIn": int(item.sourceIn()),
|
||||
"sourceOut": int(item.sourceOut()),
|
||||
"startFrame": int(item.timelineIn()),
|
||||
"endFrame": int(item.timelineOut()),
|
||||
"fps": float(item.sequence().framerate().toFloat())
|
||||
"timelineIn": int(item.timelineIn()),
|
||||
"timelineOut": int(item.timelineOut())
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -93,7 +92,7 @@ class CollectClips(api.ContextPlugin):
|
|||
"handles": 0,
|
||||
"handleStart": projectdata.get("handles", 0),
|
||||
"handleEnd": projectdata.get("handles", 0),
|
||||
"version": version
|
||||
"version": int(version)
|
||||
}
|
||||
)
|
||||
instance = context.create_instance(**data)
|
||||
|
|
|
|||
48
pype/plugins/nukestudio/publish/collect_frame_ranges.py
Normal file
48
pype/plugins/nukestudio/publish/collect_frame_ranges.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
import pyblish.api
|
||||
|
||||
class CollectClipFrameRanges(pyblish.api.InstancePlugin):
|
||||
"""Collect all frame range data: source(In,Out), timeline(In,Out), edit_(in, out), f(start, end)"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.101
|
||||
label = "Collect Frame Ranges"
|
||||
hosts = ["nukestudio"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
data = dict()
|
||||
|
||||
# Timeline data.
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
source_in_h = instance.data["sourceIn"] - handle_start
|
||||
source_out_h = instance.data["sourceOut"] + handle_end
|
||||
|
||||
timeline_in = instance.data["timelineIn"]
|
||||
timeline_out = instance.data["timelineOut"]
|
||||
|
||||
timeline_in_h = timeline_in - handle_start
|
||||
timeline_out_h = timeline_out + handle_end
|
||||
|
||||
# set frame start with tag or take it from timeline
|
||||
frame_start = instance.data.get("frameStart")
|
||||
|
||||
if not frame_start:
|
||||
frame_start = timeline_in
|
||||
|
||||
frame_end = frame_start + (timeline_out - timeline_in)
|
||||
|
||||
data.update(
|
||||
{
|
||||
"sourceInH": source_in_h,
|
||||
"sourceOutH": source_out_h,
|
||||
"startFrame": frame_start,
|
||||
"endFrame": frame_end,
|
||||
"timelineInH": timeline_in_h,
|
||||
"timelineOutH": timeline_out_h,
|
||||
"edit_in": timeline_in,
|
||||
"edit_out": timeline_out
|
||||
}
|
||||
)
|
||||
self.log.debug("__ data: {}".format(data))
|
||||
instance.data.update(data)
|
||||
|
|
@ -9,4 +9,4 @@ class CollectFramerate(api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
sequence = context.data["activeSequence"]
|
||||
context.data["framerate"] = sequence.framerate().toFloat()
|
||||
context.data["fps"] = sequence.framerate().toFloat()
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
self.log.debug("__ tags: {}".format(tags))
|
||||
|
||||
if not tags:
|
||||
return
|
||||
continue
|
||||
|
||||
# loop trough all tags
|
||||
for t in tags:
|
||||
|
|
@ -148,13 +148,13 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
self.log.debug("__ assets_shared: {}".format(assets_shared))
|
||||
if assets_shared.get(asset):
|
||||
self.log.debug("Adding to shared assets: `{}`".format(
|
||||
instance.data["name"]))
|
||||
asset))
|
||||
asset_shared = assets_shared.get(asset)
|
||||
else:
|
||||
asset_shared = assets_shared[asset]
|
||||
|
||||
asset_shared.update({
|
||||
"asset": instance.data["asset"],
|
||||
"asset": asset,
|
||||
"hierarchy": hierarchy,
|
||||
"parents": parents,
|
||||
"tasks": instance.data["tasks"]
|
||||
|
|
@ -220,7 +220,11 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
# adding frame start if any on instance
|
||||
start_frame = s_asset_data.get("frameStart")
|
||||
if start_frame:
|
||||
instance.data["frameStart"] = start_frame
|
||||
instance.data["startFrame"] = start_frame
|
||||
instance.data["endFrame"] = start_frame + (
|
||||
instance.data["timelineOut"] -
|
||||
instance.data["timelineIn"])
|
||||
|
||||
|
||||
|
||||
self.log.debug(
|
||||
|
|
@ -249,14 +253,14 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
# get custom attributes of the shot
|
||||
if instance.data.get("main"):
|
||||
in_info['custom_attributes'] = {
|
||||
'handles': int(instance.data.get('handles')),
|
||||
'handles': int(instance.data.get('handles', 0)),
|
||||
'handle_start': handle_start,
|
||||
'handle_end': handle_end,
|
||||
'fstart': int(instance.data["startFrame"]),
|
||||
'fend': int(instance.data["endFrame"]),
|
||||
'fps': instance.data["fps"],
|
||||
"edit_in": int(instance.data["startFrame"]),
|
||||
"edit_out": int(instance.data["endFrame"])
|
||||
'fstart': instance.data["startFrame"],
|
||||
'fend': instance.data["endFrame"],
|
||||
'fps': instance.context.data["fps"],
|
||||
"edit_in": instance.data["timelineIn"],
|
||||
"edit_out": instance.data["timelineOut"]
|
||||
}
|
||||
|
||||
# adding SourceResolution if Tag was present
|
||||
|
|
@ -273,15 +277,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
"pixel_aspect": pixel_aspect
|
||||
})
|
||||
|
||||
start_frame = instance.data.get("frameStart")
|
||||
if start_frame:
|
||||
in_info['custom_attributes'].update({
|
||||
'fstart': start_frame,
|
||||
'fend': start_frame + (
|
||||
instance.data["endFrame"] -
|
||||
instance.data["startFrame"])
|
||||
})
|
||||
|
||||
in_info['tasks'] = instance.data['tasks']
|
||||
|
||||
parents = instance.data.get('parents', [])
|
||||
|
|
|
|||
24
pype/plugins/nukestudio/publish/collect_leader_clip.py
Normal file
24
pype/plugins/nukestudio/publish/collect_leader_clip.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectLeaderClip(api.InstancePlugin):
|
||||
"""Collect Leader clip from selected track items. Clip with hierarchy Tag is defining sharable data attributes between other clips with `subset` tags. So `handle_start/end`, `frame_start`, etc"""
|
||||
|
||||
order = api.CollectorOrder + 0.0111
|
||||
label = "Collect Leader Clip"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
for t in tags:
|
||||
t_metadata = dict(t["metadata"])
|
||||
t_type = t_metadata.get("tag.label", "")
|
||||
self.log.info("`hierarhy`: `{}`".format(t_type))
|
||||
# gets only task family tags and collect labels
|
||||
if "hierarchy" in t_type.lower():
|
||||
if not instance.data.get("main"):
|
||||
instance.data["main"] = True
|
||||
self.log.info("`Leader Clip` found in instance.name: `{}`".format(instance.data["name"]))
|
||||
|
|
@ -57,43 +57,8 @@ class CollectPlates(api.InstancePlugin):
|
|||
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[1]
|
||||
)
|
||||
|
||||
# # Timeline data.
|
||||
# handle_start = int(instance.data["handleStart"] + data["handles"])
|
||||
# handle_end = int(instance.data["handleEnd"] + data["handles"])
|
||||
# Timeline data.
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
||||
source_in_h = data["sourceIn"] - handle_start
|
||||
source_out_h = data["sourceOut"] + handle_end
|
||||
|
||||
timeline_in = int(data["item"].timelineIn())
|
||||
timeline_out = int(data["item"].timelineOut())
|
||||
|
||||
timeline_frame_start = timeline_in - handle_start
|
||||
timeline_frame_end = timeline_out + handle_end
|
||||
|
||||
frame_start = instance.data.get("frameStart", 1)
|
||||
frame_end = frame_start + (data["sourceOut"] - data["sourceIn"])
|
||||
|
||||
data.update(
|
||||
{
|
||||
"sourceFirst": data["sourceFirst"],
|
||||
"sourceIn": data["sourceIn"],
|
||||
"sourceOut": data["sourceOut"],
|
||||
"sourceInH": source_in_h,
|
||||
"sourceOutH": source_out_h,
|
||||
"frameStart": frame_start,
|
||||
"startFrame": frame_start,
|
||||
"endFrame": frame_end,
|
||||
"timelineIn": timeline_in,
|
||||
"timelineOut": timeline_out,
|
||||
"timelineInHandles": timeline_frame_start,
|
||||
"timelineOutHandles": timeline_frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
)
|
||||
if "review" in instance.data["families"]:
|
||||
data["label"] += " - review"
|
||||
|
||||
# adding SourceResolution if Tag was present
|
||||
if instance.data.get("sourceResolution") and instance.data.get("main"):
|
||||
|
|
@ -110,9 +75,6 @@ class CollectPlates(api.InstancePlugin):
|
|||
self.log.debug("Creating instance with name: {}".format(data["name"]))
|
||||
instance.context.create_instance(**data)
|
||||
|
||||
# # remove original instance
|
||||
# instance.context.remove(instance)
|
||||
|
||||
|
||||
class CollectPlatesData(api.InstancePlugin):
|
||||
"""Collect plates"""
|
||||
|
|
@ -124,6 +86,12 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
import os
|
||||
if "review" in instance.data.get("track", ""):
|
||||
self.log.debug(
|
||||
"Skipping \"{}\" because its `review` track "
|
||||
"\"plate\"".format(instance)
|
||||
)
|
||||
return
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
|
|
@ -135,9 +103,6 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
padding = int(anatomy.templates['render']['padding'])
|
||||
|
||||
name = instance.data["subset"]
|
||||
asset = instance.data["asset"]
|
||||
track = instance.data["track"]
|
||||
version = instance.data["version"]
|
||||
source_path = instance.data["sourcePath"]
|
||||
source_file = os.path.basename(source_path)
|
||||
|
||||
|
|
@ -154,56 +119,20 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
|
||||
item = instance.data["item"]
|
||||
|
||||
# get handles
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut", "startFrame", "endFrame", "sourceInH", "sourceOutH", "timelineIn", "timelineOut", "timelineInH", "timelineOutH", "asset", "track", "version"
|
||||
]
|
||||
|
||||
# get source frames
|
||||
source_in = int(instance.data["sourceIn"])
|
||||
source_out = int(instance.data["sourceOut"])
|
||||
|
||||
# get source frames
|
||||
frame_start = int(instance.data["startFrame"])
|
||||
frame_end = int(instance.data["endFrame"])
|
||||
|
||||
# get source frames
|
||||
source_in_h = int(instance.data["sourceInH"])
|
||||
source_out_h = int(instance.data["sourceOutH"])
|
||||
|
||||
# get timeline frames
|
||||
timeline_in = int(instance.data["timelineIn"])
|
||||
timeline_out = int(instance.data["timelineOut"])
|
||||
|
||||
# frame-ranges with handles
|
||||
timeline_frame_start = int(instance.data["timelineInHandles"])
|
||||
timeline_frame_end = int(instance.data["timelineOutHandles"])
|
||||
|
||||
# get colorspace
|
||||
colorspace = item.sourceMediaColourTransform()
|
||||
|
||||
# get sequence from context, and fps
|
||||
fps = instance.data["fps"]
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": handle_start,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"sourceIn": source_in,
|
||||
"sourceOut": source_out,
|
||||
"startFrame": frame_start,
|
||||
"endFrame": frame_end,
|
||||
"timelineIn": timeline_in,
|
||||
"timelineOut": timeline_out,
|
||||
"timelineInHandles": timeline_frame_start,
|
||||
"timelineOutHandles": timeline_frame_end,
|
||||
"fps": fps,
|
||||
"colorspace": colorspace,
|
||||
"handles": version_data['handleStart'],
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"families": [f for f in families if 'ftrack' not in f],
|
||||
"asset": asset,
|
||||
"subset": name,
|
||||
"track": track,
|
||||
"version": int(version)
|
||||
"fps": instance.context.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
|
|
@ -220,10 +149,9 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
padding=padding,
|
||||
ext=ext
|
||||
)
|
||||
self.log.debug("__ source_in_h: {}".format(source_in_h))
|
||||
self.log.debug("__ source_out_h: {}".format(source_out_h))
|
||||
start_frame = source_first_frame + source_in_h
|
||||
duration = source_out_h - source_in_h
|
||||
|
||||
start_frame = source_first_frame + instance.data["sourceInH"]
|
||||
duration = instance.data["sourceOutH"] - instance.data["sourceInH"]
|
||||
end_frame = start_frame + duration
|
||||
files = [file % i for i in range(start_frame, (end_frame + 1), 1)]
|
||||
except Exception as e:
|
||||
|
|
@ -231,8 +159,8 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
head, ext = os.path.splitext(source_file)
|
||||
ext = ext[1:]
|
||||
files = source_file
|
||||
start_frame = source_in_h
|
||||
end_frame = source_out_h
|
||||
start_frame = instance.data["sourceInH"]
|
||||
end_frame = instance.data["sourceOutH"]
|
||||
|
||||
mov_file = head + ".mov"
|
||||
mov_path = os.path.normpath(os.path.join(staging_dir, mov_file))
|
||||
|
|
@ -243,9 +171,9 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
'files': mov_file,
|
||||
'stagingDir': staging_dir,
|
||||
'startFrame': 0,
|
||||
'endFrame': source_out - source_in + 1,
|
||||
'endFrame': instance.data["sourceOut"] - instance.data["sourceIn"] + 1,
|
||||
'step': 1,
|
||||
'frameRate': fps,
|
||||
'frameRate': instance.context.data["fps"],
|
||||
'preview': True,
|
||||
'thumbnail': False,
|
||||
'name': "preview",
|
||||
|
|
@ -258,8 +186,8 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
|
||||
thumb_file = head + ".png"
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
self.log.debug("__ thumb_path: {}".format(thumb_path))
|
||||
thumbnail = item.thumbnail(source_in).save(
|
||||
|
||||
thumbnail = item.thumbnail(instance.data["sourceIn"]).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
|
|
@ -281,8 +209,8 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
'stagingDir': staging_dir,
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'startFrame': frame_start - handle_start,
|
||||
'endFrame': frame_end + handle_end,
|
||||
'startFrame': instance.data["startFrame"] - instance.data["handleStart"],
|
||||
'endFrame': instance.data["endFrame"] + instance.data["handleEnd"],
|
||||
}
|
||||
instance.data["representations"].append(plates_representation)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
from pyblish import api
|
||||
|
||||
class CollectClipSubsets(api.InstancePlugin):
|
||||
"""Collect Subsets from selected Clips, Tags, Preset."""
|
||||
|
||||
order = api.CollectorOrder + 0.103
|
||||
label = "Collect Remove Clip Instaces"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
# removing original instance
|
||||
self.log.info("Removing instance.name: `{}`".format(instance.data["name"]))
|
||||
|
||||
context.remove(instance)
|
||||
|
|
@ -13,7 +13,7 @@ class CollectReviews(api.InstancePlugin):
|
|||
"""
|
||||
|
||||
# Run just before CollectSubsets
|
||||
order = api.CollectorOrder + 0.1025
|
||||
order = api.CollectorOrder + 0.1022
|
||||
label = "Collect Reviews"
|
||||
hosts = ["nukestudio"]
|
||||
families = ["clip"]
|
||||
|
|
@ -41,30 +41,22 @@ class CollectReviews(api.InstancePlugin):
|
|||
)
|
||||
return
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
if track in instance.data["track"]:
|
||||
self.log.debug("Track item on the track: {}".format(
|
||||
instance.data["track"]))
|
||||
# Collect data.
|
||||
subset = ""
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
data[key] = value
|
||||
self.log.debug("Review will work on `subset`: {}".format(
|
||||
instance.data["subset"]))
|
||||
|
||||
data["family"] = family.lower()
|
||||
data["ftrackFamily"] = "img"
|
||||
data["families"] = ["ftrack"]
|
||||
# change families
|
||||
instance.data["family"] = "plate"
|
||||
instance.data["families"] = ["review", "ftrack"]
|
||||
|
||||
data["subset"] = family.lower() + subset.title()
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
self.version_data(instance)
|
||||
self.create_thumbnail(instance)
|
||||
|
||||
data["label"] = "{} - {}".format(
|
||||
data['asset'], data["subset"]
|
||||
)
|
||||
|
||||
data["source"] = data["sourcePath"]
|
||||
|
||||
# self.log.debug("Creating instance with data: {}".format(data))
|
||||
instance.context.create_instance(**data)
|
||||
rev_inst = instance
|
||||
|
||||
else:
|
||||
self.log.debug("Track item on plateMain")
|
||||
|
|
@ -80,35 +72,89 @@ class CollectReviews(api.InstancePlugin):
|
|||
"TrackItem from track name `{}` has to be also selected".format(
|
||||
track)
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
self.log.debug("Instance review: {}".format(rev_inst.data["name"]))
|
||||
|
||||
# getting file path parameters
|
||||
file_path = rev_inst.data.get("sourcePath")
|
||||
file_dir = os.path.dirname(file_path)
|
||||
file = os.path.basename(file_path)
|
||||
ext = os.path.splitext(file)[-1][1:]
|
||||
|
||||
# adding annotation to lablel
|
||||
instance.data["label"] += " + review (.{})".format(ext)
|
||||
instance.data["families"].append("review")
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": file,
|
||||
"stagingDir": file_dir,
|
||||
"startFrame": rev_inst.data.get("sourceIn"),
|
||||
"endFrame": rev_inst.data.get("sourceOut"),
|
||||
"step": 1,
|
||||
"frameRate": rev_inst.data.get("fps"),
|
||||
"preview": True,
|
||||
"thumbnail": False,
|
||||
"name": "preview",
|
||||
"ext": ext
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.debug("Added representation: {}".format(representation))
|
||||
file_path = rev_inst.data.get("sourcePath")
|
||||
file_dir = os.path.dirname(file_path)
|
||||
file = os.path.basename(file_path)
|
||||
ext = os.path.splitext(file)[-1][1:]
|
||||
|
||||
# change label
|
||||
instance.data["label"] = "{0} - {1} - ({2}) - review".format(
|
||||
instance.data['asset'], instance.data["subset"], ext
|
||||
)
|
||||
|
||||
self.log.debug("Instance review: {}".format(rev_inst.data["name"]))
|
||||
|
||||
|
||||
# adding representation for review mov
|
||||
representation = {
|
||||
"files": file,
|
||||
"stagingDir": file_dir,
|
||||
"startFrame": rev_inst.data.get("sourceIn"),
|
||||
"endFrame": rev_inst.data.get("sourceOut"),
|
||||
"step": 1,
|
||||
"frameRate": rev_inst.data.get("fps"),
|
||||
"preview": True,
|
||||
"thumbnail": False,
|
||||
"name": "preview",
|
||||
"ext": ext
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.debug("Added representation: {}".format(representation))
|
||||
|
||||
def create_thumbnail(self, instance):
|
||||
item = instance.data["item"]
|
||||
source_in = instance.data["sourceIn"]
|
||||
|
||||
source_path = instance.data["sourcePath"]
|
||||
source_file = os.path.basename(source_path)
|
||||
head, ext = os.path.splitext(source_file)
|
||||
|
||||
# staging dir creation
|
||||
staging_dir = os.path.dirname(
|
||||
source_path)
|
||||
|
||||
thumb_file = head + ".png"
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
self.log.debug("__ thumb_path: {}".format(thumb_path))
|
||||
self.log.debug("__ source_in: {}".format(source_in))
|
||||
thumbnail = item.thumbnail(source_in).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug("__ thumbnail: {}".format(thumbnail))
|
||||
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
||||
def version_data(self, instance):
|
||||
item = instance.data["item"]
|
||||
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut", "startFrame", "endFrame", "sourceInH", "sourceOutH", "timelineIn", "timelineOut", "timelineInH", "timelineOutH", "asset", "track", "version"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
# pass data to version
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": version_data['handleStart'],
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.context.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
instance.data["source"] = instance.data["sourcePath"]
|
||||
|
|
|
|||
|
|
@ -14,12 +14,12 @@ class CollectSelection(pyblish.api.ContextPlugin):
|
|||
|
||||
self.log.debug("selection: {}".format(selection))
|
||||
|
||||
if not selection:
|
||||
self.log.debug(
|
||||
"Nothing is selected. Collecting all items from sequence "
|
||||
"\"{}\"".format(hiero.ui.activeSequence())
|
||||
)
|
||||
for track in hiero.ui.activeSequence().items():
|
||||
selection.extend(track.items())
|
||||
# if not selection:
|
||||
# self.log.debug(
|
||||
# "Nothing is selected. Collecting all items from sequence "
|
||||
# "\"{}\"".format(hiero.ui.activeSequence())
|
||||
# )
|
||||
# for track in hiero.ui.activeSequence().items():
|
||||
# selection.extend(track.items())
|
||||
|
||||
context.data["selection"] = selection
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ class CollectShots(api.ContextPlugin):
|
|||
"""Collect Shot from Clip."""
|
||||
|
||||
# Run just before CollectClipSubsets
|
||||
order = api.CollectorOrder + 0.1025
|
||||
order = api.CollectorOrder + 0.1021
|
||||
label = "Collect Shots"
|
||||
hosts = ["nukestudio"]
|
||||
families = ["clip"]
|
||||
|
|
@ -25,55 +25,24 @@ class CollectShots(api.ContextPlugin):
|
|||
)
|
||||
continue
|
||||
|
||||
if instance.data.get("main"):
|
||||
# Collect data.
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
if key in "main":
|
||||
continue
|
||||
data[key] = value
|
||||
# Collect data.
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
data[key] = value
|
||||
|
||||
data["family"] = "shot"
|
||||
data["families"] = []
|
||||
data["frameStart"] = instance.data.get("frameStart", 1)
|
||||
data["family"] = "shot"
|
||||
data["families"] = []
|
||||
|
||||
data["subset"] = data["family"] + "Main"
|
||||
data["subset"] = data["family"] + "Main"
|
||||
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
|
||||
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
|
||||
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
|
||||
)
|
||||
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
|
||||
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
|
||||
)
|
||||
|
||||
# Get handles.
|
||||
data["handleStart"] = instance.data["handleStart"]
|
||||
data["handleEnd"] = instance.data["handleEnd"]
|
||||
|
||||
# Frame-ranges with handles.
|
||||
data["sourceInH"] = data["sourceIn"] - data["handleStart"]
|
||||
data["sourceOutH"] = data["sourceOut"] + data["handleEnd"]
|
||||
|
||||
# Get timeline frames.
|
||||
data["timelineIn"] = int(data["item"].timelineIn())
|
||||
data["timelineOut"] = int(data["item"].timelineOut())
|
||||
|
||||
# Frame-ranges with handles.
|
||||
data["timelineInHandles"] = data["timelineIn"]
|
||||
data["timelineInHandles"] -= data["handleStart"]
|
||||
data["timelineOutHandles"] = data["timelineOut"]
|
||||
data["timelineOutHandles"] += data["handleEnd"]
|
||||
|
||||
# Creating comp frame range.
|
||||
data["endFrame"] = (
|
||||
data["frameStart"] + (data["sourceOut"] - data["sourceIn"])
|
||||
)
|
||||
|
||||
# Get fps.
|
||||
sequence = instance.context.data["activeSequence"]
|
||||
data["fps"] = sequence.framerate()
|
||||
|
||||
# Create instance.
|
||||
self.log.debug("Creating instance with: {}".format(data["name"]))
|
||||
instance.context.create_instance(**data)
|
||||
# Create instance.
|
||||
self.log.debug("Creating instance with: {}".format(data["name"]))
|
||||
instance.context.create_instance(**data)
|
||||
|
||||
self.log.debug("_ context: {}".format(context[:]))
|
||||
|
|
|
|||
|
|
@ -1,208 +0,0 @@
|
|||
from pyblish import api
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
class CollectClipSubsets(api.InstancePlugin):
|
||||
"""Collect Subsets from selected Clips, Tags, Preset."""
|
||||
|
||||
order = api.CollectorOrder + 0.103
|
||||
label = "Collect Subsets"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
asset_name = instance.data["asset"]
|
||||
|
||||
# get all subsets from tags and match them with nks_presets >
|
||||
# > looks to rules for tasks, subsets, representations
|
||||
subsets_collection = self.get_subsets_from_presets(instance)
|
||||
|
||||
# iterate trough subsets and create instances
|
||||
for subset, attrs in subsets_collection.items():
|
||||
self.log.info((subset, attrs))
|
||||
# create families
|
||||
item = instance.data["item"]
|
||||
family = instance.data["family"]
|
||||
families = attrs["families"] + [str(subset)]
|
||||
task = attrs["task"]
|
||||
subset = "{0}{1}".format(
|
||||
subset,
|
||||
instance.data.get("subsetType") or "Default")
|
||||
instance_name = "{0}_{1}_{2}".format(asset_name, task, subset)
|
||||
self.log.info("Creating instance with name: {}".format(
|
||||
instance_name))
|
||||
|
||||
# get handles
|
||||
handles = int(instance.data["handles"])
|
||||
handle_start = int(instance.data["handleStart"] + handles)
|
||||
handle_end = int(instance.data["handleEnd"] + handles)
|
||||
|
||||
# get source frames
|
||||
source_first = int(instance.data["sourceFirst"])
|
||||
source_in = int(instance.data["sourceIn"])
|
||||
source_out = int(instance.data["sourceOut"])
|
||||
|
||||
# frame-ranges with handles
|
||||
source_in_h = source_in - handle_start
|
||||
source_out_h = source_out + handle_end
|
||||
|
||||
# get timeline frames
|
||||
timeline_in = int(item.timelineIn())
|
||||
timeline_out = int(item.timelineOut())
|
||||
|
||||
# frame-ranges with handles
|
||||
timeline_frame_start = timeline_in - handle_start
|
||||
timeline_frame_end = timeline_out + handle_end
|
||||
|
||||
# creating comp frame range
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = frame_start + (source_out - source_in)
|
||||
|
||||
# get sequence from context, and fps
|
||||
sequence = context.data["activeSequence"]
|
||||
fps = sequence.framerate()
|
||||
|
||||
context.create_instance(
|
||||
name=instance_name,
|
||||
subset=subset,
|
||||
asset=asset_name,
|
||||
track=instance.data.get("track"),
|
||||
item=item,
|
||||
task=task,
|
||||
sourcePath=instance.data.get("sourcePath"),
|
||||
family=family,
|
||||
families=families,
|
||||
sourceFirst=source_first,
|
||||
sourceIn=source_in,
|
||||
sourceOut=source_out,
|
||||
sourceInH=source_in_h,
|
||||
sourceOutH=source_out_h,
|
||||
frameStart=frame_start,
|
||||
startFrame=frame_start,
|
||||
endFrame=frame_end,
|
||||
timelineIn=timeline_in,
|
||||
timelineOut=timeline_out,
|
||||
timelineInHandles=timeline_frame_start,
|
||||
timelineOutHandles=timeline_frame_end,
|
||||
fps=fps,
|
||||
handles=instance.data["handles"],
|
||||
handleStart=handle_start,
|
||||
handleEnd=handle_end,
|
||||
attributes=attrs,
|
||||
version=instance.data["version"],
|
||||
hierarchy=instance.data.get("hierarchy", None),
|
||||
parents=instance.data.get("parents", None),
|
||||
publish=True
|
||||
)
|
||||
|
||||
# removing original instance
|
||||
context.remove(instance)
|
||||
|
||||
def get_subsets_from_presets(self, instance):
|
||||
|
||||
family = instance.data["family"]
|
||||
# get presets and tags
|
||||
tag_tasks = instance.data["tasks"]
|
||||
presets = instance.context.data['presets']
|
||||
nks_presets = presets[instance.context.data['host']]
|
||||
family_default_preset = nks_presets["asset_default"].get(family)
|
||||
|
||||
if family_default_preset:
|
||||
frame_start = family_default_preset.get("fstart", 1)
|
||||
instance.data["frameStart"] = int(frame_start)
|
||||
|
||||
# get specific presets
|
||||
pr_host_tasks = deepcopy(
|
||||
nks_presets["rules_tasks"]).get("hostTasks", None)
|
||||
|
||||
subsets_collect = dict()
|
||||
# iterate tags and collect subset properities from presets
|
||||
for task in tag_tasks:
|
||||
self.log.info("__ task: {}".format(task))
|
||||
try:
|
||||
# get host for task
|
||||
host = None
|
||||
host = [h for h, tasks in pr_host_tasks.items()
|
||||
if task in tasks][0]
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
try:
|
||||
# get subsets for task
|
||||
subsets = None
|
||||
#subsets = pr_host_subsets[host]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if not subsets:
|
||||
continue
|
||||
|
||||
# get subsets for task
|
||||
for sub in subsets:
|
||||
# get specific presets
|
||||
pr_subsets = deepcopy(nks_presets["rules_subsets"])
|
||||
pr_representations = deepcopy(
|
||||
nks_presets["rules_representations"])
|
||||
|
||||
# initialise collection dictionary
|
||||
subs_data = dict()
|
||||
|
||||
# gets subset properities
|
||||
subs_data[sub] = None
|
||||
subs_data[sub] = pr_subsets.get(sub, None)
|
||||
|
||||
# gets representation if in keys
|
||||
if subs_data[sub] and (
|
||||
"representation" in subs_data[sub].keys()
|
||||
):
|
||||
repr_name = subs_data[sub]["representation"]
|
||||
|
||||
# owerwrite representation key with values from preset
|
||||
subs_data[sub]["representation"] = pr_representations[
|
||||
repr_name
|
||||
]
|
||||
subs_data[sub]["representation"]["name"] = repr_name
|
||||
|
||||
# gets nodes and presets data if in keys
|
||||
# gets nodes if any
|
||||
if subs_data[sub] and (
|
||||
"nodes" in subs_data[sub].keys()
|
||||
):
|
||||
# iterate trough each node
|
||||
for k in subs_data[sub]["nodes"]:
|
||||
pr_node = k
|
||||
pr_family = subs_data[sub]["nodes"][k]["family"]
|
||||
|
||||
# create attribute dict for later filling
|
||||
subs_data[sub]["nodes"][k]["attributes"] = dict()
|
||||
|
||||
# iterate presets for the node
|
||||
for p, path in subs_data[sub]["nodes"][k][
|
||||
"presets"].items():
|
||||
|
||||
# adds node type and family for preset path
|
||||
nPath = path + [pr_node, pr_family]
|
||||
|
||||
# create basic iternode to be wolked trough until
|
||||
# found presets at the end
|
||||
iternode = presets[p]
|
||||
for part in nPath:
|
||||
iternode = iternode[part]
|
||||
|
||||
iternode = {k: v for k, v in iternode.items()
|
||||
if not k.startswith("_")}
|
||||
# adds found preset to attributes of the node
|
||||
subs_data[sub]["nodes"][k][
|
||||
"attributes"].update(iternode)
|
||||
|
||||
# removes preset key
|
||||
subs_data[sub]["nodes"][k].pop("presets")
|
||||
|
||||
# add all into dictionary
|
||||
self.log.info("__ subs_data[sub]: {}".format(subs_data[sub]))
|
||||
subs_data[sub]["task"] = task.lower()
|
||||
subsets_collect.update(subs_data)
|
||||
|
||||
return subsets_collect
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipTagTypes(api.InstancePlugin):
|
||||
"""Collect Types from Tags of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.012
|
||||
label = "Collect main flag"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
for t in tags:
|
||||
t_metadata = dict(t["metadata"])
|
||||
t_family = t_metadata.get("tag.family", "")
|
||||
|
||||
# gets only task family tags and collect labels
|
||||
if "plate" in t_family:
|
||||
t_subset = t_metadata.get("tag.subset", "")
|
||||
subset_name = "{0}{1}".format(
|
||||
t_family,
|
||||
t_subset.capitalize())
|
||||
|
||||
if "plateMain" in subset_name:
|
||||
if not instance.data.get("main"):
|
||||
instance.data["main"] = True
|
||||
self.log.info("`plateMain` found in instance.name: `{}`".format(
|
||||
instance.data["name"]))
|
||||
return
|
||||
28
pype/plugins/nukestudio/publish/collect_tag_subsets.py
Normal file
28
pype/plugins/nukestudio/publish/collect_tag_subsets.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipSubsetsTags(api.InstancePlugin):
|
||||
"""Collect Subsets from Tags of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.012
|
||||
label = "Collect Tags Subsets"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
for t in tags:
|
||||
t_metadata = dict(t["metadata"])
|
||||
t_family = t_metadata.get("tag.family", None)
|
||||
t_subset = t_metadata.get("tag.subset", None)
|
||||
|
||||
# gets only task family tags and collect labels
|
||||
if t_subset and t_family:
|
||||
subset_name = "{0}{1}".format(
|
||||
t_family,
|
||||
t_subset.capitalize())
|
||||
instance.data['subset'] = subset_name
|
||||
|
||||
self.log.info("`subset`: {0} found in `instance.name`: `{1}`".format(subset_name, instance.data["name"]))
|
||||
|
|
@ -58,7 +58,9 @@ def __main__():
|
|||
]
|
||||
|
||||
print("Pype command: {}".format(" ".join(args)))
|
||||
exit_code = subprocess.call(args, shell=True)
|
||||
# Forcing forwaring the environment because environment inheritance does
|
||||
# not always work.
|
||||
exit_code = subprocess.call(args, env=os.environ)
|
||||
if exit_code != 0:
|
||||
raise ValueError("Publishing failed.")
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ def get_version_from_path(file):
|
|||
v: version number in string ('001')
|
||||
|
||||
"""
|
||||
pattern = re.compile(r"[\._]v([0-9]*)")
|
||||
pattern = re.compile(r"v([0-9]*)")
|
||||
try:
|
||||
v = pattern.findall(file)[0]
|
||||
return v
|
||||
|
|
|
|||
54
setup/nuke/nuke_path/atom_server.py
Normal file
54
setup/nuke/nuke_path/atom_server.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
'''
|
||||
Simple socket server using threads
|
||||
'''
|
||||
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import StringIO
|
||||
import contextlib
|
||||
|
||||
import nuke
|
||||
|
||||
HOST = ''
|
||||
PORT = 8888
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def stdoutIO(stdout=None):
|
||||
old = sys.stdout
|
||||
if stdout is None:
|
||||
stdout = StringIO.StringIO()
|
||||
sys.stdout = stdout
|
||||
yield stdout
|
||||
sys.stdout = old
|
||||
|
||||
|
||||
def _exec(data):
|
||||
with stdoutIO() as s:
|
||||
exec(data)
|
||||
return s.getvalue()
|
||||
|
||||
|
||||
def server_start():
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.bind((HOST, PORT))
|
||||
s.listen(5)
|
||||
|
||||
while 1:
|
||||
client, address = s.accept()
|
||||
try:
|
||||
data = client.recv(4096)
|
||||
if data:
|
||||
result = nuke.executeInMainThreadWithResult(_exec, args=(data))
|
||||
client.send(str(result))
|
||||
except SystemExit:
|
||||
result = self.encode('SERVER: Shutting down...')
|
||||
client.send(str(result))
|
||||
raise
|
||||
finally:
|
||||
client.close()
|
||||
|
||||
t = threading.Thread(None, server_start)
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
import atom_server
|
||||
|
||||
from pype.nuke.lib import (
|
||||
writes_version_sync,
|
||||
|
|
@ -15,5 +16,6 @@ log = Logger().get_logger(__name__, "nuke")
|
|||
nuke.addOnScriptSave(onScriptLoad)
|
||||
nuke.addOnScriptLoad(checkInventoryVersions)
|
||||
nuke.addOnScriptSave(checkInventoryVersions)
|
||||
nuke.addOnScriptSave(writes_version_sync)
|
||||
|
||||
log.info('Automatic syncing of write file knob to script version')
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue