Merged in feature/PYPE-338-publish-representations-for-sho (pull request #162)

feat(nukestudio): plates are collected from already encoded files

Approved-by: Milan Kolar <milan@orbi.tools>
This commit is contained in:
Jakub Jezek 2019-06-10 14:59:22 +00:00 committed by Milan Kolar
commit d61ba75375
23 changed files with 513 additions and 128 deletions

View file

@ -18,7 +18,7 @@ log = logging.getLogger(__name__)
def get_handle_irregular(asset):
data = asset["data"]
handle_start = data.get("handle_start", 0)
handle_end = asset.get("handle_end", 0)
handle_end = data.get("handle_end", 0)
return (handle_start, handle_end)

View file

@ -1,6 +1,5 @@
import os
import sys
import os
from collections import OrderedDict
from pprint import pprint
from avalon import api, io, lib
@ -196,14 +195,19 @@ def create_write_node(name, data):
except Exception as e:
log.error("problem with resolving anatomy tepmlate: {}".format(e))
fpath = str(anatomy_filled["render"]["path"]).replace("\\", "/")
# create directory
os.makedirs( os.path.dirname(fpath), 0766 )
_data = OrderedDict({
"file": str(anatomy_filled["render"]["path"]).replace("\\", "/")
"file": fpath
})
# adding dataflow template
{_data.update({k: v})
for k, v in nuke_dataflow_writes.items()
if k not in ["id", "previous"]}
if k not in ["_id", "_previous"]}
# adding dataflow template
{_data.update({k: v})
@ -351,7 +355,7 @@ def reset_frame_range_handles():
data = asset["data"]
missing_cols = []
check_cols = ["fstart", "fend"]
check_cols = ["fstart", "fend", "handle_start", "handle_end"]
for col in check_cols:
if col not in data:
@ -368,6 +372,10 @@ def reset_frame_range_handles():
handles = avalon.nuke.get_handles(asset)
handle_start, handle_end = pype.get_handle_irregular(asset)
log.info("__ handles: `{}`".format(handles))
log.info("__ handle_start: `{}`".format(handle_start))
log.info("__ handle_end: `{}`".format(handle_end))
edit_in = int(asset["data"]["fstart"]) - handles - handle_start
edit_out = int(asset["data"]["fend"]) + handles + handle_end

View file

@ -71,8 +71,11 @@ def reload_config():
import importlib
for module in (
"avalon",
"avalon.lib",
"avalon.pipeline",
"pyblish",
"pyblish_lite",
"pypeapp",
"{}.api".format(AVALON_CONFIG),
"{}.templates".format(AVALON_CONFIG),

View file

@ -95,26 +95,26 @@ def install():
'icon': QIcon('icons:Position.png')
},
"separator",
{
'action': QAction('Create...', None),
'function': creator.show,
'icon': QIcon('icons:ColorAdd.png')
},
{
'action': QAction('Load...', None),
'function': cbloader.show,
'icon': QIcon('icons:CopyRectangle.png')
},
# {
# 'action': QAction('Create...', None),
# 'function': creator.show,
# 'icon': QIcon('icons:ColorAdd.png')
# },
# {
# 'action': QAction('Load...', None),
# 'function': cbloader.show,
# 'icon': QIcon('icons:CopyRectangle.png')
# },
{
'action': QAction('Publish...', None),
'function': publish.show,
'icon': QIcon('icons:Output.png')
},
{
'action': QAction('Manage...', None),
'function': cbsceneinventory.show,
'icon': QIcon('icons:ModifyMetaData.png')
},
# {
# 'action': QAction('Manage...', None),
# 'function': cbsceneinventory.show,
# 'icon': QIcon('icons:ModifyMetaData.png')
# },
{
'action': QAction('Library...', None),
'function': libraryloader.show,

View file

@ -1,5 +1,6 @@
import os
import pyblish.api
import logging
try:
import ftrack_api_old as ftrack_api
@ -15,6 +16,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
def process(self, context):
ftrack_log = logging.getLogger('ftrack_api')
ftrack_log.setLevel(logging.WARNING)
# Collect session
session = ftrack_api.Session()
context.data["ftrackSession"] = session

View file

@ -26,7 +26,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
'render': 'render',
'nukescript': 'comp',
'review': 'mov',
'plates': 'img'
'plate': 'img'
}
def process(self, instance):

View file

@ -25,7 +25,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
}
"""
order = pyblish.api.IntegratorOrder
order = pyblish.api.IntegratorOrder - 0.04
label = 'Integrate Hierarchy To Ftrack'
families = ["clip"]
optional = False
@ -82,7 +82,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
i for i in self.context[:] if i.data['asset'] in entity['name']]
for key in custom_attributes:
assert (key in entity['custom_attributes']), (
'Missing custom attribute')
'Missing custom attribute key: `{0}` in attrs: `{1}`'.format(key, entity['custom_attributes'].keys()))
entity['custom_attributes'][key] = custom_attributes[key]

View file

@ -9,7 +9,7 @@ class CollectAssumedDestination(pyblish.api.ContextPlugin):
label = "Collect Assumed Destination"
order = pyblish.api.CollectorOrder + 0.498
exclude_families = ["clip"]
exclude_families = ["plate"]
def process(self, context):

View file

@ -7,7 +7,8 @@ class CollectProjectData(pyblish.api.ContextPlugin):
"""Collecting project data from avalon db"""
label = "Collect Project Data"
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder - 0.1
hosts = ["nukestudio"]
def process(self, context):
# get project data from avalon db

View file

@ -14,6 +14,7 @@ class IntegrateHierarchyToAvalon(pyblish.api.ContextPlugin):
def process(self, context):
if "hierarchyContext" not in context.data:
self.log.info('skipping IntegrateHierarchyToAvalon')
return
self.db = io

View file

@ -60,8 +60,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"nukescript",
"render",
"write",
"plates",
"rig"
"rig",
"plate"
]
exclude_families = ["clip"]
@ -218,7 +218,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for idx, repre in enumerate(repres):
for idx, repre in enumerate(instance.data["representations"]):
# Collection
# _______
@ -238,7 +238,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template = os.path.normpath(
anatomy.templates[template_name]["path"])
if isinstance(files, list):
src_collections, remainder = clique.assemble(files)
self.log.debug(
@ -266,7 +265,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
repres[idx]['published_path'] = dst_collection.format()
repre['published_path'] = dst_collection.format()
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
@ -282,8 +281,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# for imagesequence version data
hashes = '#' * len(dst_padding)
dst = "{0}{1}{2}".format(dst_head, hashes, dst_tail)
dst = os.path.normpath("{0}{1}{2}".format(dst_head, hashes, dst_tail))
else:
# Single file
@ -306,10 +304,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
src = os.path.join(stagingdir, fname)
# src = fname
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled[template_name]["path"]
dst = os.path.normpath(
anatomy_filled[template_name]["path"])
instance.data["transfers"].append([src, dst])
repres[idx]['published_path'] = dst
repre['published_path'] = dst
self.log.debug("__ dst: {}".format(dst))
representation = {
"schema": "pype:representation-2.0",
@ -335,13 +336,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"representation": repre['ext']
}
}
self.log.debug("__ _representation: {}".format(representation))
destination_list.append(dst)
self.log.debug("__ destination_list: {}".format(destination_list))
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.debug("__ representations: {}".format(representations))
self.log.debug("__ representations: {}".format(representations))
for rep in instance.data["representations"]:
self.log.debug("__ represNAME: {}".format(rep['name']))
self.log.debug("__ represPATH: {}".format(rep['published_path']))
io.insert_many(representations)
self.log.debug("Representation: {}".format(representations))
# self.log.debug("Representation: {}".format(representations))
self.log.info("Registered {} items".format(len(representations)))

View file

@ -75,8 +75,8 @@ def loader_shift(node, frame, relative=True):
class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
families = ["write", "source", "plates"]
representations = ["exr", "dpx"]
families = ["write", "source", "plate"]
representations = ["exr", "dpx"]
label = "Load sequence"
order = -10
@ -94,6 +94,18 @@ class LoadSequence(api.Loader):
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
handles = version_data.get("handles", None)
handle_start = version_data.get("handleStart", None)
handle_end = version_data.get("handleEnd", None)
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
handle_end = handles
# create handles offset
first -= handle_start
last += handle_end
# Fallback to asset name when namespace is None
if namespace is None:
@ -117,25 +129,23 @@ class LoadSequence(api.Loader):
if colorspace is not None:
r["colorspace"].setValue(str(colorspace))
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("startFrame", None)
if start is not None:
loader_shift(r, start, relative=True)
r["origfirst"].setValue(first)
r["first"].setValue(first)
r["origlast"].setValue(last)
r["last"].setValue(last)
loader_shift(r, first, relative=True)
r["origfirst"].setValue(first)
r["first"].setValue(first)
r["origlast"].setValue(last)
r["last"].setValue(last)
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["startFrame", "endFrame", "handles",
"source", "colorspace", "author", "fps", "version"]
"source", "colorspace", "author", "fps", "version",
"handleStart", "handleEnd"]
data_imprint = {}
for k in add_keys:
if k is 'version':
data_imprint.update({k: context["version"]['name']})
else:
data_imprint.update({k: context["version"]['data'][k]})
data_imprint.update({k: context["version"]['data'].get(k, str(None))})
data_imprint.update({"objectName": read_name})
@ -186,12 +196,28 @@ class LoadSequence(api.Loader):
max_version = max(versions)
start = version["data"].get("startFrame")
if start is None:
version_data = version.get("data", {})
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
handles = version_data.get("handles", 0)
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
if first is None:
log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(node['name'].value(), representation))
start = 0
first = 0
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
handle_end = handles
# create handles offset
first -= handle_start
last += handle_end
# Update the loader's path whilst preserving some values
with preserve_trim(node):
@ -199,24 +225,26 @@ class LoadSequence(api.Loader):
log.info("__ node['file']: {}".format(node["file"]))
# Set the global in to the start frame of the sequence
global_in_changed = loader_shift(node, start, relative=False)
if global_in_changed:
# Log this change to the user
log.debug("Changed '{}' global in:"
" {:d}".format(node['name'].value(), start))
loader_shift(node, first, relative=True)
node["origfirst"].setValue(first)
node["first"].setValue(first)
node["origlast"].setValue(last)
node["last"].setValue(last)
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"startFrame": start,
"endFrame": version["data"].get("endFrame"),
"startFrame": version_data.get("startFrame"),
"endFrame": version_data.get("endFrame"),
"version": version.get("name"),
"colorspace": version["data"].get("colorspace"),
"source": version["data"].get("source"),
"handles": version["data"].get("handles"),
"fps": version["data"].get("fps"),
"author": version["data"].get("author"),
"outputDir": version["data"].get("outputDir"),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
"handles": version_data.get("handles"),
"handleStart": version_data.get("handleStart"),
"handleEnd": version_data.get("handleEnd"),
"fps": version_data.get("fps"),
"author": version_data.get("author"),
"outputDir": version_data.get("outputDir"),
})
# change color of node

View file

@ -8,18 +8,18 @@ class ExtractPlates(pype.api.Extractor):
order = api.ExtractorOrder
label = "Extract Plates"
hosts = ["nukestudio"]
families = ["plates"]
families = ["encode"]
def process(self, instance):
import os
import hiero.core
from hiero.ui.nuke_bridge import FnNsFrameServer
# from hiero.ui.nuke_bridge import FnNsFrameServer
# add to representations
if not instance.data.get("representations"):
instance.data["representations"] = list()
repr_data = dict()
version_data = dict()
context = instance.context
anatomy = context.data.get("anatomy", None)
padding = int(anatomy.templates['render']['padding'])
@ -159,7 +159,7 @@ class ExtractPlates(pype.api.Extractor):
]
# add to data of representation
repr_data.update({
version_data.update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end,
@ -182,6 +182,7 @@ class ExtractPlates(pype.api.Extractor):
"track": track,
"version": int(version)
})
instance.data["versionData"] = version_data
# adding representation for nukescript
nk_representation = {
@ -189,7 +190,6 @@ class ExtractPlates(pype.api.Extractor):
'stagingDir': staging_dir,
'name': "nk",
'ext': "nk",
"data": repr_data
}
instance.data["representations"].append(nk_representation)
@ -200,7 +200,6 @@ class ExtractPlates(pype.api.Extractor):
'stagingDir': staging_dir,
'name': write_knobs["file_type"],
'ext': write_knobs["file_type"],
"data": repr_data
}
instance.data["representations"].append(plates_representation)
@ -227,13 +226,13 @@ class ExtractPlates(pype.api.Extractor):
family = instance.data["family"]
families = instance.data["families"]
# test prints repr_data
self.log.debug("__ repr_data: {}".format(repr_data))
# test prints version_data
self.log.debug("__ version_data: {}".format(version_data))
self.log.debug("__ nk_representation: {}".format(nk_representation))
self.log.debug("__ plates_representation: {}".format(
plates_representation))
self.log.debug("__ after family: {}".format(family))
self.log.debug("__ after families: {}".format(families))
# this will do FnNsFrameServer
FnNsFrameServer.renderFrames(*_args)
# # this will do FnNsFrameServer
# FnNsFrameServer.renderFrames(*_args)

View file

@ -9,7 +9,7 @@ class ExtractPlateCheck(api.ContextPlugin):
order = api.ExtractorOrder + 0.01
label = "Plates Export Waiting"
hosts = ["nukestudio"]
families = ["plates"]
families = ["encode"]
def process(self, context):

View file

@ -1,3 +1,4 @@
import os
from pyblish import api
@ -23,10 +24,24 @@ class CollectClips(api.ContextPlugin):
continue
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
instance_name = "{0}_{1}".format(track.name(), item.name())
try:
head, padding, ext = os.path.basename(source_path).split('.')
source_first_frame = int(padding)
except:
source_first_frame = 0
data[instance_name] = {
"item": item,
"source": source,
"sourcePath": source_path,
"track": track.name(),
"sourceFirst": source_first_frame,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"startFrame": int(item.timelineIn()),
"endFrame": int(item.timelineOut())
}
@ -37,8 +52,13 @@ class CollectClips(api.ContextPlugin):
name=key,
asset=value["item"].name(),
item=value["item"],
source=value["source"],
sourcePath=value["sourcePath"],
family=family,
families=[],
sourceFirst=value["sourceFirst"],
sourceIn=value["sourceIn"],
sourceOut=value["sourceOut"],
startFrame=value["startFrame"],
endFrame=value["endFrame"],
handles=projectdata['handles'],

View file

@ -4,7 +4,8 @@ import pype.api as pype
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder -0.05
order = pyblish.api.CollectorOrder - 0.1
def process(self, context):
"""Todo, inject the current working file"""

View file

@ -1,46 +1,44 @@
import json
from pyblish import api
class CollectClipHandles(api.InstancePlugin):
"""Collect Handles from selected track items."""
order = api.CollectorOrder + 0.006
class CollectClipHandles(api.ContextPlugin):
"""Collect Handles from all instanes and add to assetShared."""
order = api.CollectorOrder + 0.1025
label = "Collect Handles"
hosts = ["nukestudio"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
def process(self, context):
assets_shared = context.data.get("assetsShared")
assert assets_shared, "Context data missing `assetsShared` key"
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# find all main types instances and add its handles to asset shared
instances = context[:]
for instance in instances:
# get handles
handles = int(instance.data["handles"])
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
# gets only task family tags and collect labels
if "handles" in t_family:
# gets value of handles
t_value = int(t_metadata.get("tag.value", ""))
if instance.data.get("main"):
name = instance.data["asset"]
if assets_shared.get(name):
self.log.debug("Adding to shared assets: `{}`".format(
instance.data["name"]))
assets_shared[name].update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end
})
# gets arguments if there are any
t_args = t_metadata.get("tag.args", "")
# distribute handles
if not t_args:
# add handles to both sides
instance.data['handles'] = t_value
self.log.info("Collected Handles: `{}`".format(
instance.data['handles']))
else:
t_args = json.loads(t_args.replace("'", "\""))
# add in start
if 'start' in t_args['where']:
instance.data["handleStart"] += t_value
self.log.info("Collected Handle Start: `{}`".format(
instance.data["handleStart"]))
# add in end
if 'end' in t_args['where']:
instance.data["handleEnd"] += t_value
self.log.info("Collected Handle End: `{}`".format(
instance.data["handleEnd"]))
for instance in instances:
if not instance.data.get("main"):
self.log.debug("Synchronize handles on: `{}`".format(
instance.data["name"]))
name = instance.data["asset"]
s_asset_data = assets_shared.get(name)
instance.data["handles"] = s_asset_data["handles"]
instance.data["handleStart"] = s_asset_data["handleStart"]
instance.data["handleEnd"] = s_asset_data["handleEnd"]

View file

@ -13,7 +13,7 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
"""
label = "Collect Hierarchy Clip"
order = pyblish.api.CollectorOrder + 0.1
order = pyblish.api.CollectorOrder + 0.101
families = ["clip"]
def convert_to_entity(self, key, value):
@ -38,8 +38,8 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
asset = instance.data.get("asset")
# create asset_names conversion table
if not context.data.get("assetsSharedHierarchy"):
context.data["assetsSharedHierarchy"] = dict()
if not context.data.get("assetsShared"):
context.data["assetsShared"] = dict()
# build data for inner nukestudio project property
data = {
@ -110,15 +110,18 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
# create new shot asset name
instance.data["asset"] = instance.data["asset"].format(
**d_metadata)
self.log.debug("__ instance.data[asset]: {}".format(instance.data["asset"]))
# lastly fill those individual properties itno
# format the string with collected data
parents = [{"entityName": p["entityName"].format(
**d_metadata), "entityType": p["entityType"]}
for p in parents]
self.log.debug("__ parents: {}".format(parents))
hierarchy = template.format(
**d_metadata)
self.log.debug("__ hierarchy: {}".format(hierarchy))
# check if hierarchy attribute is already created
# it should not be so return warning if it is
@ -126,17 +129,18 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
assert not hd, "Only one Hierarchy Tag is \
allowed. Clip: `{}`".format(asset)
assetsSharedHierarchy = {
assetsShared = {
asset: {
"asset": instance.data["asset"],
"hierarchy": hierarchy,
"parents": parents
}}
self.log.debug("__ assetsShared: {}".format(assetsShared))
# add formated hierarchy path into instance data
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
context.data["assetsSharedHierarchy"].update(
assetsSharedHierarchy)
context.data["assetsShared"].update(
assetsShared)
class CollectHierarchyContext(pyblish.api.ContextPlugin):
@ -145,7 +149,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''
label = "Collect Hierarchy Context"
order = pyblish.api.CollectorOrder + 0.101
order = pyblish.api.CollectorOrder + 0.102
def update_dict(self, ex_dict, new_dict):
for key in ex_dict:
@ -170,16 +174,37 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
name = instance.data["asset"]
# inject assetsSharedHierarchy to other plates types
assets_shared = context.data.get("assetsSharedHierarchy")
# get handles
handles = int(instance.data["handles"])
handle_start = int(instance.data["handleStart"] + handles)
handle_end = int(instance.data["handleEnd"] + handles)
# get source frames
source_first = int(instance.data["sourceFirst"])
source_in = int(instance.data["sourceIn"])
source_out = int(instance.data["sourceOut"])
instance.data['startFrame'] = int(
source_first + source_in - handle_start)
instance.data['endFrame'] = int(
(source_first + source_out + handle_end))
# inject assetsShared to other plates types
assets_shared = context.data.get("assetsShared")
if assets_shared:
s_asset_data = assets_shared.get(name)
if s_asset_data:
self.log.debug("__ s_asset_data: {}".format(s_asset_data))
name = instance.data["asset"] = s_asset_data["asset"]
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
self.log.debug("__ instance.data[parents]: {}".format(instance.data["parents"]))
self.log.debug("__ instance.data[hierarchy]: {}".format(instance.data["hierarchy"]))
self.log.debug("__ instance.data[name]: {}".format(instance.data["name"]))
if "main" not in instance.data["name"].lower():
continue
in_info = {}
# suppose that all instances are Shots
@ -187,14 +212,29 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# get custom attributes of the shot
in_info['custom_attributes'] = {
'fend': instance.data['endFrame'],
'fstart': instance.data['startFrame'],
'handles': int(instance.data.get('handles')),
'fend': int(
(source_first + source_out)),
'fstart': int(
source_first + source_in),
'fps': context.data["framerate"]
}
handle_start = instance.data.get('handleStart')
handle_end = instance.data.get('handleEnd')
self.log.debug("__ handle_start: {}".format(handle_start))
self.log.debug("__ handle_end: {}".format(handle_end))
if handle_start and handle_end:
in_info['custom_attributes'].update({
"handle_start": handle_start,
"handle_end": handle_end
})
in_info['tasks'] = instance.data['tasks']
parents = instance.data.get('parents', [])
self.log.debug("__ in_info: {}".format(in_info))
actual = {name: in_info}

View file

@ -0,0 +1,210 @@
from pyblish import api
import pype
class CollectPlates(api.InstancePlugin):
"""Collect plates"""
order = api.CollectorOrder + 0.49
label = "Collect Plates"
hosts = ["nukestudio"]
families = ["plate"]
def process(self, instance):
import os
# add to representations
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
context = instance.context
anatomy = context.data.get("anatomy", None)
padding = int(anatomy.templates['render']['padding'])
name = instance.data["subset"]
asset = instance.data["asset"]
track = instance.data["track"]
family = instance.data["family"]
families = instance.data["families"]
version = instance.data["version"]
source_path = instance.data["sourcePath"]
source_file = os.path.basename(source_path)
# staging dir creation
staging_dir = os.path.dirname(
source_path)
item = instance.data["item"]
# get handles
handles = int(instance.data["handles"])
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
# get source frames
source_in = int(instance.data["sourceIn"])
source_out = int(instance.data["sourceOut"])
# get source frames
frame_start = int(instance.data["startFrame"])
frame_end = int(instance.data["endFrame"])
# get source frames
source_in_h = int(instance.data["sourceInH"])
source_out_h = int(instance.data["sourceOutH"])
# get timeline frames
timeline_in = int(instance.data["timelineIn"])
timeline_out = int(instance.data["timelineOut"])
# frame-ranges with handles
timeline_frame_start = int(instance.data["timelineInHandles"])
timeline_frame_end = int(instance.data["timelineOutHandles"])
# get colorspace
colorspace = item.sourceMediaColourTransform()
# get sequence from context, and fps
fps = float(str(instance.data["fps"]))
# test output
self.log.debug("__ handles: {}".format(handles))
self.log.debug("__ handle_start: {}".format(handle_start))
self.log.debug("__ handle_end: {}".format(handle_end))
self.log.debug("__ frame_start: {}".format(frame_start))
self.log.debug("__ frame_end: {}".format(frame_end))
self.log.debug("__ f duration: {}".format(frame_end - frame_start + 1))
self.log.debug("__ source_in: {}".format(source_in))
self.log.debug("__ source_out: {}".format(source_out))
self.log.debug("__ s duration: {}".format(source_out - source_in + 1))
self.log.debug("__ source_in_h: {}".format(source_in_h))
self.log.debug("__ source_out_h: {}".format(source_out_h))
self.log.debug("__ sh duration: {}".format(source_out_h - source_in_h + 1))
self.log.debug("__ timeline_in: {}".format(timeline_in))
self.log.debug("__ timeline_out: {}".format(timeline_out))
self.log.debug("__ t duration: {}".format(timeline_out - timeline_in + 1))
self.log.debug("__ timeline_frame_start: {}".format(
timeline_frame_start))
self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end))
self.log.debug("__ colorspace: {}".format(colorspace))
self.log.debug("__ track: {}".format(track))
self.log.debug("__ fps: {}".format(fps))
self.log.debug("__ source_file: {}".format(source_file))
self.log.debug("__ staging_dir: {}".format(staging_dir))
self.log.debug("__ before family: {}".format(family))
self.log.debug("__ before families: {}".format(families))
#
# this is just workaround because 'clip' family is filtered
instance.data["family"] = families[-1]
instance.data["families"].append(family)
# add to data of representation
version_data.update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end,
"sourceIn": source_in,
"sourceOut": source_out,
"startFrame": frame_start,
"endFrame": frame_end,
"timelineIn": timeline_in,
"timelineOut": timeline_out,
"timelineInHandles": timeline_frame_start,
"timelineOutHandles": timeline_frame_end,
"fps": fps,
"colorspace": colorspace,
"families": [f for f in families if 'ftrack' not in f],
"asset": asset,
"subset": name,
"track": track,
"version": int(version)
})
instance.data["versionData"] = version_data
try:
head, padding, ext = source_file.split('.')
source_first_frame = int(padding)
padding = len(padding)
file = "{head}.%0{padding}d.{ext}".format(
head=head,
padding=padding,
ext=ext
)
start_frame = source_first_frame
end_frame = source_first_frame + source_out
files = [file % i for i in range(
(source_first_frame + source_in_h),
((source_first_frame + source_out_h) + 1), 1)]
except Exception as e:
self.log.debug("Exception in file: {}".format(e))
head, ext = source_file.split('.')
files = source_file
start_frame = source_in_h
end_frame = source_out_h
mov_file = head + ".mov"
mov_path = os.path.normpath(os.path.join(staging_dir, mov_file))
if os.path.exists(mov_path):
# adding mov into the representations
self.log.debug("__ mov_path: {}".format(mov_path))
plates_mov_representation = {
'files': mov_file,
'stagingDir': staging_dir,
'startFrame': 0,
'endFrame': source_out - source_in + 1,
'step': 1,
'frameRate': fps,
'preview': True,
'thumbnail': False,
'name': "preview",
'ext': "mov",
}
instance.data["representations"].append(
plates_mov_representation)
thumb_file = head + ".png"
thumb_path = os.path.join(staging_dir, thumb_file)
self.log.debug("__ thumb_path: {}".format(thumb_path))
thumbnail = item.thumbnail(source_in).save(
thumb_path,
format='png'
)
self.log.debug("__ thumbnail: {}".format(thumbnail))
thumb_representation = {
'files': thumb_file,
'stagingDir': staging_dir,
'name': "thumbnail",
'thumbnail': True,
'ext': "png"
}
instance.data["representations"].append(
thumb_representation)
# adding representation for plates
plates_representation = {
'files': files,
'stagingDir': staging_dir,
'name': ext,
'ext': ext,
'startFrame': start_frame,
'endFrame': end_frame,
}
instance.data["representations"].append(plates_representation)
# testing families
family = instance.data["family"]
families = instance.data["families"]
# test prints version_data
self.log.debug("__ version_data: {}".format(version_data))
self.log.debug("__ plates_representation: {}".format(
plates_representation))
self.log.debug("__ after family: {}".format(family))
self.log.debug("__ after families: {}".format(families))
# # this will do FnNsFrameServer
# FnNsFrameServer.renderFrames(*_args)

View file

@ -5,7 +5,7 @@ import hiero
class CollectSequence(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder
order = api.CollectorOrder - 0.01
label = "Collect Sequence"
hosts = ["nukestudio"]

View file

@ -5,7 +5,7 @@ from copy import deepcopy
class CollectClipSubsets(api.InstancePlugin):
"""Collect Subsets from selected Clips, Tags, Preset."""
order = api.CollectorOrder + 0.102
order = api.CollectorOrder + 0.103
label = "Collect Subsets"
hosts = ["nukestudio"]
families = ['clip']
@ -39,6 +39,15 @@ class CollectClipSubsets(api.InstancePlugin):
handle_start = int(instance.data["handleStart"] + handles)
handle_end = int(instance.data["handleEnd"] + handles)
# get source frames
source_first = int(instance.data["sourceFirst"])
source_in = int(instance.data["sourceIn"])
source_out = int(instance.data["sourceOut"])
# frame-ranges with handles
source_in_h = source_in - handle_start
source_out_h = source_out + handle_end
# get timeline frames
timeline_in = int(item.timelineIn())
timeline_out = int(item.timelineOut())
@ -48,13 +57,12 @@ class CollectClipSubsets(api.InstancePlugin):
timeline_frame_end = timeline_out + handle_end
# creating comp frame range
frame_start = instance.data["frameStart"] - handle_start
frame_end = frame_start + \
(timeline_frame_end - timeline_frame_start)
frame_start = instance.data["frameStart"]
frame_end = frame_start + (source_out - source_in)
# get sequence from context, and fps
sequence = context.data["activeSequence"]
fps = int(str(sequence.framerate()))
fps = sequence.framerate()
context.create_instance(
name=instance_name,
@ -63,8 +71,14 @@ class CollectClipSubsets(api.InstancePlugin):
track=instance.data.get("track"),
item=item,
task=task,
sourcePath=instance.data.get("sourcePath"),
family=family,
families=families,
sourceFirst=source_first,
sourceIn=source_in,
sourceOut=source_out,
sourceInH=source_in_h,
sourceOutH=source_out_h,
frameStart=frame_start,
startFrame=frame_start,
endFrame=frame_end,
@ -108,6 +122,7 @@ class CollectClipSubsets(api.InstancePlugin):
subsets_collect = dict()
# iterate tags and collect subset properities from presets
for task in tag_tasks:
self.log.info("__ task: {}".format(task))
try:
# get host for task
host = None
@ -188,6 +203,7 @@ class CollectClipSubsets(api.InstancePlugin):
subs_data[sub]["nodes"][k].pop("presets")
# add all into dictionary
self.log.info("__ subs_data[sub]: {}".format(subs_data[sub]))
subs_data[sub]["task"] = task.lower()
subsets_collect.update(subs_data)

View file

@ -0,0 +1,47 @@
import json
from pyblish import api
class CollectClipTagHandles(api.InstancePlugin):
"""Collect Handles from selected track items."""
order = api.CollectorOrder + 0.012
label = "Collect Tag Handles"
hosts = ["nukestudio"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "handles" in t_family:
# gets value of handles
t_value = int(t_metadata.get("tag.value", ""))
# gets arguments if there are any
t_args = t_metadata.get("tag.args", "")
# distribute handles
if not t_args:
# add handles to both sides
instance.data['handles'] = t_value
self.log.info("Collected Handles: `{}`".format(
instance.data['handles']))
else:
t_args = json.loads(t_args.replace("'", "\""))
# add in start
if 'start' in t_args['where']:
instance.data["handleStart"] += t_value
self.log.info("Collected Handle Start: `{}`".format(
instance.data["handleStart"]))
# add in end
if 'end' in t_args['where']:
instance.data["handleEnd"] += t_value
self.log.info("Collected Handle End: `{}`".format(
instance.data["handleEnd"]))

View file

@ -26,6 +26,9 @@ class CollectClipTagTypes(api.InstancePlugin):
t_type.capitalize(), t_order)
subset_names.append(subset_type)
if "main" in t_type:
instance.data["main"] = True
if subset_names:
instance.data["subsetType"] = subset_names[0]