Merge branch 'feature/PYPE-488-nk-loading-nks-lut-soft-effects' into feature/PYPE-481-nuke-load-last-versions-of-subs

This commit is contained in:
Jakub Jezek 2019-08-21 17:14:27 +02:00
commit 64e4e7c2c9
32 changed files with 818 additions and 132 deletions

View file

@ -88,9 +88,11 @@ class FtrackModule:
def set_action_server(self):
try:
self.action_server.run_server()
except Exception:
msg = 'Ftrack Action server crashed! Please try to start again.'
log.error(msg)
except Exception as exc:
log.error(
"Ftrack Action server crashed! Please try to start again.",
exc_info=True
)
# TODO show message to user
self.bool_action_server = False
self.set_menu_visibility()

View file

@ -63,6 +63,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
"lut",
"audio"
]
exclude_families = ["clip"]

View file

@ -0,0 +1,317 @@
from avalon import api, style, io
import nuke
import json
from collections import OrderedDict
class LoadLuts(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["lutJson"]
families = ["lut"]
label = "Load Luts - nodes"
order = 0
icon = "cc"
color = style.colors.light
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# import dependencies
from avalon.nuke import containerise
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode("Group")
GN["name"].setValue(object_name)
# adding content to the group node
with GN:
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if isinstance(v, list) and len(v) > 4:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to find parent read node
self.connect_read_node(GN, namespace, json_f["assignTo"])
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
return containerise(
node=GN,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
from avalon.nuke import (
update_container
)
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = api.get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# Update the imprinted representation
update_container(
GN,
data_imprint
)
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# adding content to the group node
with GN:
# first remove all nodes
[nuke.delete(n) for n in nuke.allNodes()]
# create input node
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if isinstance(v, list) and len(v) > 3:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
# create output node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to find parent read node
self.connect_read_node(GN, namespace, json_f["assignTo"])
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd84f20ff", 16))
else:
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("udated to version: {}".format(version.get("name")))
def connect_read_node(self, group_node, asset, subset):
"""
Finds read node and selects it
Arguments:
asset (str): asset name
Returns:
nuke node: node is selected
None: if nothing found
"""
search_name = "{0}_{1}".format(asset, subset)
node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
if len(node) > 0:
rn = node[0]
else:
None
# Parent read node has been found
# solving connections
if rn:
dep_nodes = rn.dependent()
if len(dep_nodes) > 0:
for dn in dep_nodes:
dn.setInput(0, group_node)
group_node.setInput(0, rn)
group_node.autoplace()
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
for subTrackIndex in range(
min(subTrackNums), max(subTrackNums) + 1):
item = self.get_item(data, trackIndex, subTrackIndex)
if item is not {}:
new_order.update(item)
return new_order
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -96,6 +96,8 @@ class LoadSequence(api.Loader):
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
@ -104,6 +106,9 @@ class LoadSequence(api.Loader):
if namespace is None:
namespace = context['asset']['name']
first -= self.handle_start
last += self.handle_end
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
@ -231,6 +236,7 @@ class LoadSequence(api.Loader):
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
@ -241,6 +247,9 @@ class LoadSequence(api.Loader):
"{} ({})".format(node['name'].value(), representation))
first = 0
first -= self.handle_start
last += self.handle_end
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file["path"])

View file

@ -20,83 +20,114 @@ class CollectClips(api.ContextPlugin):
projectdata = context.data["projectData"]
version = context.data.get("version", "001")
instances_data = []
for item in context.data.get("selection", []):
# Skip audio track items
# Try/Except is to handle items types, like EffectTrackItem
try:
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
if str(item.mediaType()) != media_type:
sequence = context.data.get("activeSequence")
selection = context.data.get("selection")
track_effects = dict()
# collect all trackItems as instances
for track_index, video_track in enumerate(sequence.videoTracks()):
items = video_track.items()
sub_items = video_track.subTrackItems()
for item in items:
# compare with selection or if disabled
if item not in selection or not item.isEnabled():
continue
except:
# Skip audio track items
# Try/Except is to handle items types, like EffectTrackItem
try:
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
if str(item.mediaType()) != media_type:
continue
except Exception:
continue
asset = item.name()
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
effects = [f for f in item.linkedItems() if f.isEnabled()]
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script
# rather than opening it.
if source_path.endswith(".nk"):
nuke.scriptOpen(source_path)
# There should noly be one.
write_node = nuke.allNodes(filter="Write")[0]
path = nuke.filename(write_node)
if "%" in path:
# Get start frame from Nuke script and use the item source
# in/out, because you can have multiple shots covered with
# one nuke script.
start_frame = int(nuke.root()["first_frame"].getValue())
if write_node["use_limit"].getValue():
start_frame = int(write_node["first"].getValue())
path = path % (start_frame + item.sourceIn())
source_path = path
self.log.debug(
"Fetched source path \"{}\" from \"{}\" in "
"\"{}\".".format(
source_path, write_node.name(), source.firstpath()
)
)
try:
head, padding, ext = os.path.basename(source_path).split(".")
source_first_frame = int(padding)
except Exception:
source_first_frame = 0
data = {"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"sourcePath": source_path,
"track": track.name(),
"trackIndex": track_index,
"sourceFirst": source_first_frame,
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut()),
"asset": asset,
"family": "clip",
"families": [],
"handles": 0,
"handleStart": projectdata.get("handles", 0),
"handleEnd": projectdata.get("handles", 0),
"version": int(version)}
instance = context.create_instance(**data)
self.log.info("Created instance: {}".format(instance))
self.log.debug(">> effects: {}".format(instance.data["effects"]))
context.data["assetsShared"][asset] = dict()
# from now we are collecting only subtrackitems on
# track with no video items
if len(items) > 0:
continue
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
# create list in track key
# get all subTrackItems and add it to context
track_effects[track_index] = list()
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script
# rather than opening it.
if source_path.endswith(".nk"):
nuke.scriptOpen(source_path)
# There should noly be one.
write_node = nuke.allNodes(filter="Write")[0]
path = nuke.filename(write_node)
# collect all subtrack items
for sitem in sub_items:
# unwrap from tuple >> it is always tuple with one item
sitem = sitem[0]
# checking if not enabled
if not sitem.isEnabled():
continue
if "%" in path:
# Get start frame from Nuke script and use the item source
# in/out, because you can have multiple shots covered with
# one nuke script.
start_frame = int(nuke.root()["first_frame"].getValue())
if write_node["use_limit"].getValue():
start_frame = int(write_node["first"].getValue())
track_effects[track_index].append(sitem)
path = path % (start_frame + item.sourceIn())
source_path = path
self.log.debug(
"Fetched source path \"{}\" from \"{}\" in "
"\"{}\".".format(
source_path, write_node.name(), source.firstpath()
)
)
try:
head, padding, ext = os.path.basename(source_path).split(".")
source_first_frame = int(padding)
except:
source_first_frame = 0
instances_data.append(
{
"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"sourcePath": source_path,
"track": track.name(),
"sourceFirst": source_first_frame,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut())
}
)
for data in instances_data:
data.update(
{
"asset": data["item"].name(),
"family": "clip",
"families": [],
"handles": 0,
"handleStart": projectdata.get("handles", 0),
"handleEnd": projectdata.get("handles", 0),
"version": int(version)
}
)
instance = context.create_instance(**data)
self.log.debug(
"Created instance with data: {}".format(instance.data)
)
context.data["assetsShared"][data["asset"]] = dict()
context.data["trackEffects"] = track_effects
self.log.debug(">> sub_track_items: `{}`".format(track_effects))

View file

@ -0,0 +1,95 @@
import pyblish.api
import re
class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.CollectorOrder + 0.1015
label = "Collect Soft Lut Effects"
families = ["clip"]
def process(self, instance):
self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
# taking active sequence
subset = instance.data["subset"]
track_effects = instance.context.data.get("trackEffects", {})
track_index = instance.data["trackIndex"]
effects = instance.data["effects"]
# creating context attribute
self.effects = {"assignTo": subset, "effects": dict()}
for sitem in effects:
self.add_effect(instance, track_index, sitem)
for t_index, sitems in track_effects.items():
for sitem in sitems:
if not t_index > track_index:
continue
self.log.debug(">> sitem: `{}`".format(sitem))
self.add_effect(instance, t_index, sitem)
instance.data["effectTrackItems"] = self.effects
if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
instance.data["families"] += ["lut"]
self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))
def add_effect(self, instance, track_index, item):
track = item.parentTrack().name()
# node serialization
node = item.node()
node_serialized = self.node_serialisation(instance, node)
# collect timelineIn/Out
effect_t_in = int(item.timelineIn())
effect_t_out = int(item.timelineOut())
node_name = item.name()
node_class = re.sub(r"\d+", "", node_name)
self.effects["effects"].update({node_name: {
"class": node_class,
"timelineIn": effect_t_in,
"timelineOut": effect_t_out,
"subTrackIndex": item.subTrackIndex(),
"trackIndex": track_index,
"track": track,
"node": node_serialized
}})
def node_serialisation(self, instance, node):
node_serialized = {}
timeline_in_h = instance.data["clipInH"]
timeline_out_h = instance.data["clipOutH"]
# adding ignoring knob keys
_ignoring_keys = ['invert_mask', 'help', 'mask',
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
'channels', 'maskChannelMask', 'maskChannelInput',
'note_font', 'note_font_size', 'unpremult',
'postage_stamp_frame', 'maskChannel', 'export_cc',
'select_cccid', 'mix', 'version']
# loop trough all knobs and collect not ignored
# and any with any value
for knob in node.knobs().keys():
# skip nodes in ignore keys
if knob in _ignoring_keys:
continue
# get animation if node is animated
if node[knob].isAnimated():
# grab animation including handles
knob_anim = [node[knob].getValueAt(i)
for i in range(timeline_in_h, timeline_out_h + 1)]
node_serialized[knob] = knob_anim
else:
node_serialized[knob] = node[knob].value()
return node_serialized

View file

@ -38,6 +38,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
sequence = context.data['activeSequence']
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# build data for inner nukestudio project property
data = {
@ -157,6 +161,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
"width": width,
"height": height,
"pixelAspect": pixel_aspect,
"tasks": instance.data["tasks"]
})
@ -191,7 +198,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
def process(self, context):
instances = context[:]
sequence = context.data['activeSequence']
# create hierarchyContext attr if context has none
temp_context = {}
@ -216,6 +223,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["width"] = s_asset_data["width"]
instance.data["height"] = s_asset_data["height"]
instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
@ -265,16 +275,10 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding SourceResolution if Tag was present
if instance.data.get("main"):
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format(
width, height, pixel_aspect))
in_info['custom_attributes'].update({
"resolutionWidth": width,
"resolutionHeight": height,
"pixelAspect": pixel_aspect
"resolutionWidth": instance.data["width"],
"resolutionHeight": instance.data["height"],
"pixelAspect": instance.data["pixelAspect"]
})
in_info['tasks'] = instance.data['tasks']

View file

@ -66,11 +66,14 @@ class CollectPlates(api.InstancePlugin):
item = instance.data["item"]
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
self.log.info("Source Width and Height are: `{0} x {1}`".format(
width, height))
pixel_aspect = int(item.source().mediaSource().pixelAspect())
self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
width, height, pixel_aspect))
data.update({
"width": width,
"height": height
"height": height,
"pixelAspect": pixel_aspect
})
self.log.debug("Creating instance with name: {}".format(data["name"]))
@ -123,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin):
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
"clipInH", "clipOutH", "asset", "track", "version"
"clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect"
]
# pass data to version
@ -133,6 +136,7 @@ class CollectPlatesData(api.InstancePlugin):
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": [f for f in families if 'ftrack' not in f],
"subset": name,
"fps": instance.context.data["fps"]

View file

@ -14,12 +14,4 @@ class CollectSelection(pyblish.api.ContextPlugin):
self.log.debug("selection: {}".format(selection))
# if not selection:
# self.log.debug(
# "Nothing is selected. Collecting all items from sequence "
# "\"{}\"".format(hiero.ui.activeSequence())
# )
# for track in hiero.ui.activeSequence().items():
# selection.extend(track.items())
context.data["selection"] = selection

View file

@ -1,7 +1,7 @@
from pyblish import api
class CollectShots(api.ContextPlugin):
class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
@ -10,39 +10,39 @@ class CollectShots(api.ContextPlugin):
hosts = ["nukestudio"]
families = ["clip"]
def process(self, context):
for instance in context[:]:
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
if tag["name"].lower() == "hierarchy":
tagged = True
def process(self, instance):
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance))
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
if tag["name"].lower() == "hierarchy":
tagged = True
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance)
)
continue
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance)
)
return
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
self.log.debug("_ context: {}".format(context[:]))
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)

View file

@ -0,0 +1,231 @@
# from pype import plugins
import os
import json
import re
import pyblish.api
import tempfile
from avalon import io, api
class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.ExtractorOrder
label = "Export Soft Lut Effects"
families = ["lut"]
def process(self, instance):
item = instance.data["item"]
effects = instance.data.get("effectTrackItems")
instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
if len(subset_split) > 0:
root_name = subset.replace(subset_split[0], "")
subset_split.insert(0, root_name.capitalize())
subset_split.insert(0, "lut")
self.log.debug("creating staging dir")
# staging_dir = self.staging_dir(instance)
# TODO: only provisory will be replace by function
staging_dir = instance.data.get('stagingDir', None)
if not staging_dir:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data['stagingDir'] = staging_dir
self.log.debug("creating staging dir: `{}`".format(staging_dir))
transfers = list()
if "transfers" not in instance.data:
instance.data["transfers"] = list()
name = "".join(subset_split)
ext = "json"
file = name + "." + ext
# create new instance and inherit data
data = {}
for key, value in instance.data.iteritems():
data[key] = value
# change names
data["subset"] = name
data["family"] = "lut"
data["families"] = []
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(file)[1]
)
data["source"] = data["sourcePath"]
# create new instance
instance = instance.context.create_instance(**data)
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
for k, effect in effects["effects"].items():
trn = self.copy_linked_files(effect, dst_dir)
if trn:
transfers.append((trn[0], trn[1]))
instance.data["transfers"].extend(transfers)
self.log.debug("_ transfers: `{}`".format(
instance.data["transfers"]))
# create representations
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
"version"
]
# pass data to version
version_data = dict()
version_data.update({k: instance.data[k] for k in transfer_data})
# add to data of representation
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": ["plate", "lut"],
"subset": name,
"fps": instance.context.data["fps"]
})
instance.data["versionData"] = version_data
representation = {
'files': file,
'stagingDir': staging_dir,
'name': "lut" + ext.title(),
'ext': ext
}
instance.data["representations"].append(representation)
self.log.debug("_ representations: `{}`".format(
instance.data["representations"]))
self.log.debug("_ version_data: `{}`".format(
instance.data["versionData"]))
with open(os.path.join(staging_dir, file), "w") as outfile:
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
return
def copy_linked_files(self, effect, dst_dir):
for k, v in effect["node"].items():
if k in "file" and v is not '':
base_name = os.path.basename(v)
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
# add it to the json
effect["node"][k] = dst
return (v, dst)
def resource_destination_dir(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
return os.path.join(
instance.data["assumedDestination"],
"resources"
)
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset['silo']
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
padding = int(a_template['render']['padding'])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 199 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 215 KiB

After

Width:  |  Height:  |  Size: 6.1 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

After

Width:  |  Height:  |  Size: 10 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 194 KiB

After

Width:  |  Height:  |  Size: 5.7 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 8.2 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 190 KiB

After

Width:  |  Height:  |  Size: 8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

After

Width:  |  Height:  |  Size: 5.6 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 103 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 4.5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

After

Width:  |  Height:  |  Size: 7.5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.6 KiB

After

Width:  |  Height:  |  Size: 3.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 8.1 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 4.7 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 87 KiB

After

Width:  |  Height:  |  Size: 7.9 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 7.4 KiB

Before After
Before After