Merge branch 'feature/PYPE-331-nks-lut-workflow_altered' into feature/PYPE-488-nk-loading-nks-lut-soft-effects

This commit is contained in:
Jakub Jezek 2019-08-19 15:42:55 +02:00
commit 80d9aab960
16 changed files with 1189 additions and 318 deletions

View file

@ -23,6 +23,7 @@ from .lib import (
get_asset,
get_project,
get_hierarchy,
get_subsets,
get_version_from_path,
modified_environ,
add_tool_to_environment
@ -53,6 +54,7 @@ __all__ = [
"get_project",
"get_hierarchy",
"get_asset",
"get_subsets",
"get_version_from_path",
"modified_environ",
"add_tool_to_environment",

View file

@ -492,6 +492,72 @@ def filter_pyblish_plugins(plugins):
setattr(plugin, option, value)
# Remove already processed plugins from dictionary
# WARNING Requires plugins with unique names
presets.pop(plugin.__name__)
def get_subsets(asset_name,
regex_filter=None,
version=None,
representations=["exr", "dpx"]):
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
Arguments:
asset_name (str): asset (shot) name
regex_filter (raw): raw string with filter pattern
version (str or int): `last` or number of version
representations (list): list for all representations
Returns:
dict: subsets with version and representaions in keys
"""
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset",
"name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
Check correct name: `{}`".format(asset_name)
# create subsets query filter
filter_query = {"type": "subset", "parent": asset_io["_id"]}
# add reggex filter string into query filter
if regex_filter:
filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}})
else:
filter_query.update({"name": {"$regex": r'.*'}})
# query all assets
subsets = [s for s in io.find(filter_query)]
assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
output_dict = {}
# Process subsets
for subset in subsets:
if not version:
version_sel = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
else:
assert isinstance(version, int), "version needs to be `int` type"
version_sel = io.find_one({"type": "version",
"parent": subset["_id"],
"name": int(version)})
find_dict = {"type": "representation",
"parent": version_sel["_id"]}
filter_repr = {"$or": [{"name": repr} for repr in representations]}
find_dict.update(filter_repr)
repres_out = [i for i in io.find(find_dict)]
if len(repres_out) > 0:
output_dict[subset["name"]] = {"version": version_sel,
"representaions": repres_out}
return output_dict

View file

@ -104,7 +104,7 @@ def install():
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
workfile_settings = lib.WorkfileSettings()
# Disable all families except for the ones we explicitly want to see
family_states = [
"write",
@ -121,7 +121,7 @@ def install():
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
# Set context settings.
nuke.addOnCreate(lib.set_context_settings, nodeClass="Root")
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
menu.install()

View file

@ -1,10 +1,12 @@
import os
import sys
import getpass
from collections import OrderedDict
from pprint import pprint
from avalon import api, io, lib
import avalon.nuke
import pype.api as pype
import nuke
from .templates import (
get_colorspace_preset,
@ -12,6 +14,11 @@ from .templates import (
get_node_colorspace_preset
)
from .templates import (
get_anatomy
)
# TODO: remove get_anatomy and import directly Anatomy() here
from pypeapp import Logger
log = Logger().get_logger(__name__, "nuke")
@ -159,11 +166,6 @@ def format_anatomy(data):
'''
# TODO: perhaps should be nonPublic
from .templates import (
get_anatomy
)
# TODO: remove get_anatomy and import directly Anatomy() here
anatomy = get_anatomy()
log.debug("__ anatomy.templates: {}".format(anatomy.templates))
@ -195,6 +197,7 @@ def script_name():
'''
return nuke.root().knob('name').value()
def add_button_write_to_read(node):
name = "createReadNode"
label = "Create Read"
@ -203,6 +206,7 @@ def add_button_write_to_read(node):
k.setFlag(0x1000)
node.addKnob(k)
def create_write_node(name, data, prenodes=None):
''' Creating write node which is group node
@ -311,7 +315,6 @@ def create_write_node(name, data, prenodes=None):
else:
prev_node = nuke.createNode("Input", "name rgba")
# creating write node
now_node = avalon.nuke.lib.add_write_node("inside_{}".format(name),
**_data
@ -331,7 +334,6 @@ def create_write_node(name, data, prenodes=None):
# imprinting group node
GN = avalon.nuke.imprint(GN, data["avalon"])
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@ -339,7 +341,7 @@ def create_write_node(name, data, prenodes=None):
# adding write to read button
add_button_write_to_read(GN)
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@ -347,7 +349,6 @@ def create_write_node(name, data, prenodes=None):
tile_color = _data.get("tile_color", "0xff0000ff")
GN["tile_color"].setValue(tile_color)
# add render button
lnk = nuke.Link_Knob("Render")
lnk.makeLink(write_node.name(), "Render")
@ -378,130 +379,134 @@ def add_rendering_knobs(node):
return node
def set_viewers_colorspace(viewer):
''' Adds correct colorspace to viewer
class WorkfileSettings(object):
"""
All settings for workfile will be set
This object is setting all possible root settings to the workfile.
Including Colorspace, Frame ranges, Resolution format. It can set it
to Root node or to any given node.
Arguments:
viewer (obj): nuke viewer node object to be fixed
root (node): nuke's root node
nodes (list): list of nuke's nodes
nodes_filter (list): filtering classes for nodes
'''
assert isinstance(viewer, dict), log.error(
"set_viewers_colorspace(): argument should be dictionary")
"""
filter_knobs = [
"viewerProcess",
"wipe_position"
]
viewers = [n for n in nuke.allNodes() if n.Class() == 'Viewer']
erased_viewers = []
def __init__(self,
root_node=None,
nodes=None,
**kwargs):
self._project = kwargs.get(
"project") or io.find_one({"type": "project"})
self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"]
self._asset_entity = pype.get_asset(self._asset)
self._root_node = root_node or nuke.root()
self._nodes = self.get_nodes(nodes=nodes)
for v in viewers:
v['viewerProcess'].setValue(str(viewer["viewerProcess"]))
if str(viewer["viewerProcess"]) not in v['viewerProcess'].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
pprint(copy_knobs)
# delete viewer with wrong settings
erased_viewers.append(v['name'].value())
nuke.delete(v)
self.data = kwargs
# create new viewer
nv = nuke.createNode("Viewer")
def get_nodes(self, nodes=None, nodes_filter=None):
# filter out only dictionaries for node creation
#
# print("\n\n")
# pprint(self._nodes)
#
# connect to original inputs
for i, n in enumerate(copy_inputs):
nv.setInput(i, n)
if not isinstance(nodes, list) and not isinstance(nodes_filter, list):
return [n for n in nuke.allNodes()]
elif not isinstance(nodes, list) and isinstance(nodes_filter, list):
nodes = list()
for filter in nodes_filter:
[nodes.append(n) for n in nuke.allNodes(filter=filter)]
return nodes
elif isinstance(nodes, list) and not isinstance(nodes_filter, list):
return [n for n in self._nodes]
elif isinstance(nodes, list) and isinstance(nodes_filter, list):
for filter in nodes_filter:
return [n for n in self._nodes if filter in n.Class()]
# set coppied knobs
for k, v in copy_knobs.items():
print(k, v)
nv[k].setValue(v)
def set_viewers_colorspace(self, viewer_dict):
''' Adds correct colorspace to viewer
# set viewerProcess
nv['viewerProcess'].setValue(str(viewer["viewerProcess"]))
Arguments:
viewer_dict (dict): adjustments from presets
if erased_viewers:
log.warning(
"Attention! Viewer nodes {} were erased."
"It had wrong color profile".format(erased_viewers))
'''
assert isinstance(viewer_dict, dict), log.error(
"set_viewers_colorspace(): argument should be dictionary")
filter_knobs = [
"viewerProcess",
"wipe_position"
]
def set_root_colorspace(root_dict):
''' Adds correct colorspace to root
erased_viewers = []
for v in [n for n in self._nodes
if "Viewer" in n.Class()]:
v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
if str(viewer_dict["viewerProcess"]) \
not in v['viewerProcess'].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
Arguments:
root_dict (dict): nuke root node as dictionary
# delete viewer with wrong settings
erased_viewers.append(v['name'].value())
nuke.delete(v)
'''
assert isinstance(root_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
# create new viewer
nv = nuke.createNode("Viewer")
# first set OCIO
if nuke.root()["colorManagement"].value() not in str(root_dict["colorManagement"]):
nuke.root()["colorManagement"].setValue(
str(root_dict["colorManagement"]))
# connect to original inputs
for i, n in enumerate(copy_inputs):
nv.setInput(i, n)
# second set ocio version
if nuke.root()["OCIO_config"].value() not in str(root_dict["OCIO_config"]):
nuke.root()["OCIO_config"].setValue(str(root_dict["OCIO_config"]))
# set coppied knobs
for k, v in copy_knobs.items():
print(k, v)
nv[k].setValue(v)
# then set the rest
for knob, value in root_dict.items():
if nuke.root()[knob].value() not in value:
nuke.root()[knob].setValue(str(value))
log.debug("nuke.root()['{}'] changed to: {}".format(knob, value))
# set viewerProcess
nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
if erased_viewers:
log.warning(
"Attention! Viewer nodes {} were erased."
"It had wrong color profile".format(erased_viewers))
def set_writes_colorspace(write_dict):
''' Adds correct colorspace to write node dict
def set_root_colorspace(self, root_dict):
''' Adds correct colorspace to root
Arguments:
write_dict (dict): nuke write node as dictionary
Arguments:
root_dict (dict): adjustmensts from presets
'''
# TODO: complete this function so any write node in scene will have fixed colorspace following presets for the project
assert isinstance(write_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
'''
assert isinstance(root_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
# first set OCIO
if self._root_node["colorManagement"].value() \
not in str(root_dict["colorManagement"]):
self._root_node["colorManagement"].setValue(
str(root_dict["colorManagement"]))
# second set ocio version
if self._root_node["OCIO_config"].value() \
not in str(root_dict["OCIO_config"]):
self._root_node["OCIO_config"].setValue(
str(root_dict["OCIO_config"]))
def set_colorspace():
''' Setting colorpace following presets
'''
nuke_colorspace = get_colorspace_preset().get("nuke", None)
# then set the rest
for knob, value in root_dict.items():
if self._root_node[knob].value() not in value:
self._root_node[knob].setValue(str(value))
log.debug("nuke.root()['{}'] changed to: {}".format(
knob, value))
try:
set_root_colorspace(nuke_colorspace["root"])
except AttributeError:
log.error(
"set_colorspace(): missing `root` settings in template")
try:
set_viewers_colorspace(nuke_colorspace["viewer"])
except AttributeError:
log.error(
"set_colorspace(): missing `viewer` settings in template")
try:
set_writes_colorspace(nuke_colorspace["write"])
except AttributeError:
log.error(
"set_colorspace(): missing `write` settings in template")
try:
for key in nuke_colorspace:
log.debug("Preset's colorspace key: {}".format(key))
except TypeError:
log.error("Nuke is not in templates! \n\n\n"
"contact your supervisor!")
def reset_frame_range_handles():
"""Set frame range to current asset"""
root = nuke.root()
name = api.Session["AVALON_ASSET"]
asset_entity = pype.get_asset(name)
def set_writes_colorspace(self, write_dict):
''' Adds correct colorspace to write node dict
if "data" not in asset_entity:
msg = "Asset {} don't have set any 'data'".format(name)
@ -509,170 +514,221 @@ def reset_frame_range_handles():
nuke.message(msg)
return
data = asset_entity["data"]
Arguments:
write_dict (dict): nuke write node as dictionary
missing_cols = []
check_cols = ["fps", "frameStart", "frameEnd", "handleStart", "handleEnd"]
'''
# TODO: complete this function so any write node in
# scene will have fixed colorspace following presets for the project
assert isinstance(write_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
for col in check_cols:
if col not in data:
missing_cols.append(col)
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
if len(missing_cols) > 0:
missing = ", ".join(missing_cols)
msg = "'{}' are not set for asset '{}'!".format(missing, name)
log.warning(msg)
nuke.message(msg)
return
def set_colorspace(self):
''' Setting colorpace following presets
'''
nuke_colorspace = get_colorspace_preset().get("nuke", None)
# get handles values
handle_start = asset_entity["data"]["handleStart"]
handle_end = asset_entity["data"]["handleEnd"]
fps = asset_entity["data"]["fps"]
frame_start = int(asset_entity["data"]["frameStart"]) - handle_start
frame_end = int(asset_entity["data"]["frameEnd"]) + handle_end
root["fps"].setValue(fps)
root["first_frame"].setValue(frame_start)
root["last_frame"].setValue(frame_end)
# setting active viewers
nuke.frame(int(asset_entity["data"]["frameStart"]))
range = '{0}-{1}'.format(
int(asset_entity["data"]["frameStart"]),
int(asset_entity["data"]["frameEnd"]))
for node in nuke.allNodes(filter="Viewer"):
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
# adding handle_start/end to root avalon knob
if not avalon.nuke.imprint(root, {
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}):
log.warning("Cannot set Avalon knob to Root node!")
def reset_resolution():
"""Set resolution to project resolution."""
log.info("Reseting resolution")
project = io.find_one({"type": "project"})
asset = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": asset, "type": "asset"})
width = asset.get('data', {}).get("resolutionWidth")
height = asset.get('data', {}).get("resolutionHeight")
pixel_aspect = asset.get('data', {}).get("pixelAspect")
if any(not x for x in [width, height, pixel_aspect]):
log.error("Missing set shot attributes in DB. \nContact your supervisor!. \n\nWidth: `{0}` \nHeight: `{1}` \nPixel Asspect: `{2}`".format(
width, height, pixel_aspect))
return
bbox = asset.get('data', {}).get('crop')
if bbox:
try:
x, y, r, t = bbox.split(".")
self.set_root_colorspace(nuke_colorspace["root"])
except AttributeError:
log.error(
"set_colorspace(): missing `root` settings in template")
try:
self.set_viewers_colorspace(nuke_colorspace["viewer"])
except AttributeError:
log.error(
"set_colorspace(): missing `viewer` settings in template")
try:
self.set_writes_colorspace(nuke_colorspace["write"])
except AttributeError:
log.error(
"set_colorspace(): missing `write` settings in template")
try:
for key in nuke_colorspace:
log.debug("Preset's colorspace key: {}".format(key))
except TypeError:
log.error("Nuke is not in templates! \n\n\n"
"contact your supervisor!")
def reset_frame_range_handles(self):
"""Set frame range to current asset"""
if "data" not in self._asset_entity:
msg = "Asset {} don't have set any 'data'".format(self._asset)
log.warning(msg)
nuke.message(msg)
return
data = self._asset_entity["data"]
missing_cols = []
check_cols = ["fps", "frameStart", "frameEnd",
"handleStart", "handleEnd"]
for col in check_cols:
if col not in data:
missing_cols.append(col)
if len(missing_cols) > 0:
missing = ", ".join(missing_cols)
msg = "'{}' are not set for asset '{}'!".format(
missing, self._asset)
log.warning(msg)
nuke.message(msg)
return
# get handles values
handle_start = data["handleStart"]
handle_end = data["handleEnd"]
fps = data["fps"]
frame_start = int(data["frameStart"]) - handle_start
frame_end = int(data["frameEnd"]) + handle_end
self._root_node["fps"].setValue(fps)
self._root_node["first_frame"].setValue(frame_start)
self._root_node["last_frame"].setValue(frame_end)
# setting active viewers
try:
nuke.frame(int(data["frameStart"]))
except Exception as e:
bbox = None
log.error("{}: {} \nFormat:Crop need to be set with dots, example: "
"0.0.1920.1080, /nSetting to default".format(__name__, e))
log.warning("no viewer in scene: `{}`".format(e))
used_formats = list()
for f in nuke.formats():
if project["name"] in str(f.name()):
used_formats.append(f)
else:
format_name = project["name"] + "_1"
range = '{0}-{1}'.format(
int(data["frameStart"]),
int(data["frameEnd"]))
crnt_fmt_str = ""
if used_formats:
check_format = used_formats[-1]
format_name = "{}_{}".format(
project["name"],
int(used_formats[-1].name()[-1]) + 1
)
log.info(
"Format exists: {}. "
"Will create new: {}...".format(
used_formats[-1].name(),
format_name)
)
crnt_fmt_kargs = {
"width": (check_format.width()),
"height": (check_format.height()),
"pixelAspect": float(check_format.pixelAspect())
for node in nuke.allNodes(filter="Viewer"):
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
# adding handle_start/end to root avalon knob
if not avalon.nuke.imprint(self._root_node, {
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}):
log.warning("Cannot set Avalon knob to Root node!")
def reset_resolution(self):
"""Set resolution to project resolution."""
log.info("Reseting resolution")
width = self._asset_entity.get('data', {}).get("resolutionWidth")
height = self._asset_entity.get('data', {}).get("resolutionHeight")
pixel_aspect = self._asset_entity.get('data', {}).get("pixelAspect")
if any(not x for x in [width, height, pixel_aspect]):
log.error("Missing set shot attributes in DB. \nContact"
"your supervisor!. \n\nWidth: `{0}` \nHeight: `{1}`"
"\nPixel Asspect: `{2}`".format(
width, height, pixel_aspect))
return
bbox = self._asset_entity.get('data', {}).get('crop')
if bbox:
try:
x, y, r, t = bbox.split(".")
except Exception as e:
bbox = None
log.error("{}: {} \nFormat:Crop need to be set with dots,"
" example: 0.0.1920.1080, /nSetting to"
" default".format(__name__, e))
used_formats = list()
for f in nuke.formats():
if self._project["name"] in str(f.name()):
used_formats.append(f)
else:
format_name = self._project["name"] + "_1"
crnt_fmt_str = ""
if used_formats:
check_format = used_formats[-1]
format_name = "{}_{}".format(
self._project["name"],
int(used_formats[-1].name()[-1]) + 1
)
log.info(
"Format exists: {}. "
"Will create new: {}...".format(
used_formats[-1].name(),
format_name)
)
crnt_fmt_kargs = {
"width": (check_format.width()),
"height": (check_format.height()),
"pixelAspect": float(check_format.pixelAspect())
}
if bbox:
crnt_fmt_kargs.update({
"x": int(check_format.x()),
"y": int(check_format.y()),
"r": int(check_format.r()),
"t": int(check_format.t()),
})
crnt_fmt_str = self.make_format_string(**crnt_fmt_kargs)
new_fmt_kargs = {
"width": int(width),
"height": int(height),
"pixelAspect": float(pixel_aspect),
"project_name": format_name
}
if bbox:
crnt_fmt_kargs.update({
"x": int(check_format.x()),
"y": int(check_format.y()),
"r": int(check_format.r()),
"t": int(check_format.t()),
new_fmt_kargs.update({
"x": int(x),
"y": int(y),
"r": int(r),
"t": int(t),
})
crnt_fmt_str = make_format_string(**crnt_fmt_kargs)
new_fmt_kargs = {
"width": int(width),
"height": int(height),
"pixelAspect": float(pixel_aspect),
"project_name": format_name
}
if bbox:
new_fmt_kargs.update({
"x": int(x),
"y": int(y),
"r": int(r),
"t": int(t),
})
new_fmt_str = self.make_format_string(**new_fmt_kargs)
new_fmt_str = make_format_string(**new_fmt_kargs)
if new_fmt_str not in crnt_fmt_str:
self.make_format(frm_str=new_fmt_str,
project_name=new_fmt_kargs["project_name"])
if new_fmt_str not in crnt_fmt_str:
make_format(frm_str=new_fmt_str,
project_name=new_fmt_kargs["project_name"])
log.info("Format is set")
log.info("Format is set")
def make_format_string(self, **args):
if args.get("r"):
return (
"{width} "
"{height} "
"{x} "
"{y} "
"{r} "
"{t} "
"{pixelAspect:.2f}".format(**args)
)
else:
return (
"{width} "
"{height} "
"{pixelAspect:.2f}".format(**args)
)
def make_format(self, **args):
log.info("Format does't exist, will create: \n{}".format(args))
nuke.addFormat("{frm_str} "
"{project_name}".format(**args))
self._root_node["format"].setValue("{project_name}".format(**args))
def make_format_string(**args):
if args.get("r"):
return (
"{width} "
"{height} "
"{x} "
"{y} "
"{r} "
"{t} "
"{pixelAspect:.2f}".format(**args)
)
else:
return (
"{width} "
"{height} "
"{pixelAspect:.2f}".format(**args)
)
def make_format(**args):
log.info("Format does't exist, will create: \n{}".format(args))
nuke.addFormat("{frm_str} "
"{project_name}".format(**args))
nuke.root()["format"].setValue("{project_name}".format(**args))
def set_context_settings():
# replace reset resolution from avalon core to pype's
reset_resolution()
# replace reset resolution from avalon core to pype's
reset_frame_range_handles()
# add colorspace menu item
set_colorspace()
def set_context_settings(self):
# replace reset resolution from avalon core to pype's
self.reset_resolution()
# replace reset resolution from avalon core to pype's
self.reset_frame_range_handles()
# add colorspace menu item
self.set_colorspace()
def get_hierarchical_attr(entity, attr, default=None):
@ -730,3 +786,309 @@ def get_write_node_template_attr(node):
# fix badly encoded data
return avalon.nuke.lib.fix_data_for_node_create(correct_data)
class BuildWorkfile(WorkfileSettings):
"""
Building first version of workfile.
Settings are taken from presets and db. It will add all subsets in last version for defined representaions
Arguments:
variable (type): description
"""
xpos = 0
ypos = 0
xpos_size = 80
ypos_size = 90
xpos_gap = 50
ypos_gap = 50
pos_layer = 10
def __init__(self,
root_path=None,
root_node=None,
nodes=None,
to_script=None,
**kwargs):
"""
A short description.
A bit longer description.
Argumetns:
root_path (str): description
root_node (nuke.Node): description
nodes (list): list of nuke.Node
nodes_effects (dict): dictionary with subsets
Example:
nodes_effects = {
"plateMain": {
"nodes": [
[("Class", "Reformat"),
("resize", "distort"),
("flip", True)],
[("Class", "Grade"),
("blackpoint", 0.5),
("multiply", 0.4)]
]
},
}
"""
WorkfileSettings.__init__(self,
root_node=root_node,
nodes=nodes,
**kwargs)
self.to_script = to_script
# collect data for formating
data = {
"root": root_path or api.Session["AVALON_PROJECTS"],
"project": {"name": self._project["name"],
"code": self._project["data"].get("code", '')},
"asset": self._asset or os.environ["AVALON_ASSET"],
"task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(),
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
"version": kwargs.get("version", {}).get("name", 1),
"user": getpass.getuser(),
"comment": "firstBuild"
}
# get presets from anatomy
anatomy = get_anatomy()
# format anatomy
anatomy_filled = anatomy.format(data)
# get dir and file for workfile
self.work_dir = anatomy_filled["avalon"]["work"]
self.work_file = anatomy_filled["avalon"]["workfile"] + ".nk"
def save_script_as(self, path=None):
# first clear anything in open window
nuke.scriptClear()
if not path:
dir = self.work_dir
path = os.path.join(
self.work_dir,
self.work_file).replace("\\", "/")
else:
dir = os.path.dirname(path)
# check if folder is created
if not os.path.exists(dir):
os.makedirs(dir)
# save script to path
nuke.scriptSaveAs(path)
def process(self,
regex_filter=None,
version=None,
representations=["exr", "dpx"]):
"""
A short description.
A bit longer description.
Args:
regex_filter (raw string): regex pattern to filter out subsets
version (int): define a particular version, None gets last
representations (list):
Returns:
type: description
Raises:
Exception: description
"""
if not self.to_script:
# save the script
self.save_script_as()
# create viewer and reset frame range
viewer = self.get_nodes(nodes_filter=["Viewer"])
if not viewer:
vn = nuke.createNode("Viewer")
vn["xpos"].setValue(self.xpos)
vn["ypos"].setValue(self.ypos)
else:
vn = viewer[-1]
# move position
self.position_up()
wn = self.write_create()
wn["xpos"].setValue(self.xpos)
wn["ypos"].setValue(self.ypos)
wn["render"].setValue(True)
vn.setInput(0, wn)
bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
color='0xcc1102ff', layer=-1,
nodes=[wn])
# move position
self.position_up(2)
# set frame range for new viewer
self.reset_frame_range_handles()
# get all available representations
subsets = pype.get_subsets(self._asset,
regex_filter=regex_filter,
version=version,
representations=representations)
nodes_backdrop = list()
for name, subset in subsets.items():
log.info("Building Loader to: `{}`".format(name))
version = subset["version"]
log.info("Version to: `{}`".format(version["name"]))
representations = subset["representaions"]
for repr in representations:
rn = self.read_loader(repr)
rn["xpos"].setValue(self.xpos)
rn["ypos"].setValue(self.ypos)
wn.setInput(0, rn)
# get editional nodes
# # TODO: link it to nut Create and Load
print("\n\n__________ nodes __________")
# # create all editional nodes
# for n in nodes:
# print(n)
# # create nodes
# klass, value = n[0]
# node = nuke.createNode(value)
# print(node.name())
#
# for k, v in n:
# if "Class" not in k:
# node[k].setValue(v)
# self._nodes.append(node)
# move position
self.position_right()
nodes_backdrop.append(rn)
bdn = self.create_backdrop(label="Loaded Reads",
color='0x2d7702ff', layer=-1,
nodes=nodes_backdrop)
def read_loader(self, representation):
"""
Gets Loader plugin for image sequence or mov
Arguments:
representation (dict): avalon db entity
"""
context = representation["context"]
read_name = "Read_{0}_{1}_{2}".format(context["asset"],
context["subset"],
context["representation"])
loader_name = "LoadSequence"
if "mov" in context["representation"]:
loader_name = "LoadMov"
loader_plugin = None
for Loader in api.discover(api.Loader):
if Loader.__name__ != loader_name:
continue
loader_plugin = Loader
return api.load(Loader=loader_plugin,
representation=representation["_id"])
def write_create(self):
"""
Create render write
Arguments:
representation (dict): avalon db entity
"""
Create_name = "CreateWriteRender"
creator_plugin = None
for Creator in api.discover(api.Creator):
if Creator.__name__ != Create_name:
continue
creator_plugin = Creator
# return api.create()
return creator_plugin("render_writeMain", self._asset).process()
def create_backdrop(self, label="", color=None, layer=0,
nodes=None):
"""
Create Backdrop node
Arguments:
color (str): nuke compatible string with color code
layer (int): layer of node usually used (self.pos_layer - 1)
label (str): the message
nodes (list): list of nodes to be wrapped into backdrop
"""
assert isinstance(nodes, list), "`nodes` should be a list of nodes"
# Calculate bounds for the backdrop node.
bdX = min([node.xpos() for node in nodes])
bdY = min([node.ypos() for node in nodes])
bdW = max([node.xpos() + node.screenWidth() for node in nodes]) - bdX
bdH = max([node.ypos() + node.screenHeight() for node in nodes]) - bdY
# Expand the bounds to leave a little border. Elements are offsets
# for left, top, right and bottom edges respectively
left, top, right, bottom = (-20, -65, 20, 60)
bdX += left
bdY += top
bdW += (right - left)
bdH += (bottom - top)
bdn = nuke.createNode("BackdropNode")
bdn["z_order"].setValue(self.pos_layer + layer)
if color:
bdn["tile_color"].setValue(int(color, 16))
bdn["xpos"].setValue(bdX)
bdn["ypos"].setValue(bdY)
bdn["bdwidth"].setValue(bdW)
bdn["bdheight"].setValue(bdH)
if label:
bdn["label"].setValue(label)
bdn["note_font_size"].setValue(20)
return bdn
def position_reset(self, xpos=0, ypos=0):
self.xpos = xpos
self.ypos = ypos
def position_right(self, multiply=1):
self.xpos += (self.xpos_size * multiply) + self.xpos_gap
def position_left(self, multiply=1):
self.xpos -= (self.xpos_size * multiply) + self.xpos_gap
def position_down(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
def position_up(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap

View file

@ -9,7 +9,7 @@ log = Logger().get_logger(__name__, "nuke")
def install():
menubar = nuke.menu("Nuke")
menu = menubar.findItem(Session["AVALON_LABEL"])
workfile_settings = lib.WorkfileSettings()
# replace reset resolution from avalon core to pype's
name = "Reset Resolution"
new_name = "Set Resolution"
@ -20,7 +20,7 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
menu.addCommand(new_name, lib.reset_resolution, index=(rm_item[0]))
menu.addCommand(new_name, workfile_settings.reset_resolution, index=(rm_item[0]))
# replace reset frame range from avalon core to pype's
name = "Reset Frame Range"
@ -31,20 +31,28 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
menu.addCommand(new_name, lib.reset_frame_range_handles, index=(rm_item[0]))
menu.addCommand(new_name, workfile_settings.reset_frame_range_handles, index=(rm_item[0]))
# add colorspace menu item
name = "Set colorspace"
menu.addCommand(
name, lib.set_colorspace,
name, workfile_settings.set_colorspace,
index=(rm_item[0]+2)
)
log.debug("Adding menu item: {}".format(name))
# add workfile builder menu item
name = "Build First Workfile.."
menu.addCommand(
name, lib.BuildWorkfile().process,
index=(rm_item[0]+7)
)
log.debug("Adding menu item: {}".format(name))
# add item that applies all setting above
name = "Apply all settings"
menu.addCommand(
name, lib.set_context_settings, index=(rm_item[0]+3)
name, workfile_settings.set_context_settings, index=(rm_item[0]+3)
)
log.debug("Adding menu item: {}".format(name))

View file

@ -189,7 +189,7 @@ def add_submission():
class PublishAction(QtWidgets.QAction):
"""
Action with is showing as menu item
Action with is showing as menu item
"""
def __init__(self):
@ -287,3 +287,59 @@ def _show_no_gui():
messagebox.setStandardButtons(messagebox.Ok)
messagebox.exec_()
def CreateNukeWorkfile(nodes=None,
nodes_effects=None,
to_timeline=False,
**kwargs):
''' Creating nuke workfile with particular version with given nodes
Also it is creating timeline track items as precomps.
Arguments:
nodes(list of dict): each key in dict is knob order is important
to_timeline(type): will build trackItem with metadata
Returns:
bool: True if done
Raises:
Exception: with traceback
'''
import hiero.core
from avalon.nuke import imprint
from pype.nuke import (
lib as nklib
)
# check if the file exists if does then Raise "File exists!"
if os.path.exists(filepath):
raise FileExistsError("File already exists: `{}`".format(filepath))
# if no representations matching then
# Raise "no representations to be build"
if len(representations) == 0:
raise AttributeError("Missing list of `representations`")
# check nodes input
if len(nodes) == 0:
log.warning("Missing list of `nodes`")
# create temp nk file
nuke_script = hiero.core.nuke.ScriptWriter()
# create root node and save all metadata
root_node = hiero.core.nuke.RootNode()
root_path = os.environ["AVALON_PROJECTS"]
nuke_script.addNode(root_node)
# here to call pype.nuke.lib.BuildWorkfile
script_builder = nklib.BuildWorkfile(
root_node=root_node,
root_path=root_path,
nodes=nuke_script.getNodes(),
**kwargs
)

View file

@ -63,6 +63,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
"lut",
"audio"
]
exclude_families = ["clip"]

View file

@ -69,9 +69,7 @@ class CreateWriteRender(avalon.nuke.Creator):
write_data.update({
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
create_write_node(self.data["subset"], write_data)
return
return create_write_node(self.data["subset"], write_data)
class CreateWritePrerender(avalon.nuke.Creator):

View file

@ -107,7 +107,11 @@ class LoadSequence(api.Loader):
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
read_name = "Read_" + context["representation"]["context"]["subset"]
repr_cont = context["representation"]["context"]
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@ -227,7 +231,7 @@ class LoadSequence(api.Loader):
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)

View file

@ -20,7 +20,33 @@ class CollectClips(api.ContextPlugin):
projectdata = context.data["projectData"]
version = context.data.get("version", "001")
sequence = context.data.get("activeSequence")
instances_data = []
# get all subTrackItems and add it to context
effects_on_tracks = []
sub_track_items = []
# looop trough all tracks and search for subtreacks
for track_index, video_track in enumerate(sequence.videoTracks()):
sub_items = video_track.subTrackItems()
if not sub_items:
continue
for si in sub_items:
selected_track = [(indx, vt) for indx, vt in enumerate(sequence.videoTracks())
if vt.name() in si[0].parentTrack().name()]
# if filtered track index is the same as \
# actual track there is match
if (selected_track[0][0] == track_index):
sub_track_items += si
if (track_index not in effects_on_tracks):
effects_on_tracks.append(track_index)
# add it to context
context.data["subTrackUsedTracks"] = effects_on_tracks
context.data["subTrackItems"] = sub_track_items
for item in context.data.get("selection", []):
# Skip audio track items
# Try/Except is to handle items types, like EffectTrackItem
@ -28,7 +54,7 @@ class CollectClips(api.ContextPlugin):
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
if str(item.mediaType()) != media_type:
continue
except:
except Exception:
continue
track = item.parent()
@ -65,7 +91,7 @@ class CollectClips(api.ContextPlugin):
try:
head, padding, ext = os.path.basename(source_path).split(".")
source_first_frame = int(padding)
except:
except Exception:
source_first_frame = 0
instances_data.append(

View file

@ -0,0 +1,109 @@
import pyblish.api
import hiero.core
class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.CollectorOrder + 0.1015
label = "Collect Soft Lut Effects"
families = ["clip"]
def process(self, instance):
self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
# taking active sequence
subset = instance.data["subset"]
sequence = instance.context.data['activeSequence']
effects_on_tracks = instance.context.data.get("subTrackUsedTracks")
sub_track_items = instance.context.data.get("subTrackItems")
track = instance.data["track"]
timeline_in_h = instance.data["clipInH"]
timeline_out_h = instance.data["clipOutH"]
timeline_in = instance.data["clipIn"]
timeline_out = instance.data["clipOut"]
# adding ignoring knob keys
_ignoring_keys = ['invert_mask', 'help', 'mask',
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
'channels', 'maskChannelMask', 'maskChannelInput',
'note_font', 'note_font_size', 'unpremult',
'postage_stamp_frame', 'maskChannel', 'export_cc',
'select_cccid', 'mix', 'version']
# creating context attribute
effects = {"assignTo": subset, "effects": dict()}
for subtrack_item in sub_track_items:
sub_track = subtrack_item.parentTrack().name()
# ignore anything not EffectTrackItem
if not isinstance(subtrack_item, hiero.core.EffectTrackItem):
continue
et_item = subtrack_item
# ignore item if not enabled
if not et_item.isEnabled():
continue
node = et_item.node()
node_serialized = {}
# loop trough all knobs and collect not ignored
# and any with any value
for knob in node.knobs().keys():
# skip nodes in ignore keys
if knob in _ignoring_keys:
continue
# get animation if node is animated
if node[knob].isAnimated():
# grab animation including handles
knob_anim = [node[knob].getValueAt(i)
for i in range(timeline_in_h, timeline_out_h + 1)]
node_serialized[knob] = knob_anim
else:
node_serialized[knob] = node[knob].value()
# pick track index from subTrackItem
pick_sub_track = [indx for indx, vt
in enumerate(sequence.videoTracks())
if vt.name() in sub_track]
# pick track index from trackItem
pick_track = [indx for indx, vt in enumerate(sequence.videoTracks())
if vt.name() in track]
# collect timelineIn/Out
effect_t_in = int(et_item.timelineIn())
effect_t_out = int(et_item.timelineOut())
# controle if parent track has video trackItems
items_check = et_item.parent().items()
# filter out all track items under any track with effects
# also filter out track item bellow
if (pick_track[0] in effects_on_tracks) and (pick_sub_track[0] >= pick_track[0]):
if (effect_t_in == timeline_in) and (effect_t_out == timeline_out):
effects["effects"].update({et_item.name(): {
"timelineIn": effect_t_in,
"timelineOut": effect_t_out,
"subTrackIndex": et_item.subTrackIndex(),
"trackIndex": pick_track[0],
"node": node_serialized
}})
# for subTrackItem on track without any trackItems
elif len(items_check) == 0:
effects["effects"].update({et_item.name(): {
"timelineIn": effect_t_in,
"timelineOut": effect_t_out,
"subTrackIndex": et_item.subTrackIndex(),
"trackIndex": pick_track[0],
"node": node_serialized
}})
instance.data["effectTrackItems"] = effects
if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
instance.data["families"] += ["lut"]
self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))

View file

@ -38,6 +38,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
sequence = context.data['activeSequence']
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# build data for inner nukestudio project property
data = {
@ -157,6 +161,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
"width": width,
"height": height,
"pixelAspect": pixel_aspect,
"tasks": instance.data["tasks"]
})
@ -191,7 +198,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
def process(self, context):
instances = context[:]
sequence = context.data['activeSequence']
# create hierarchyContext attr if context has none
temp_context = {}
@ -216,6 +223,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["width"] = s_asset_data["width"]
instance.data["height"] = s_asset_data["height"]
instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
@ -265,16 +275,10 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding SourceResolution if Tag was present
if instance.data.get("main"):
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format(
width, height, pixel_aspect))
in_info['custom_attributes'].update({
"resolutionWidth": width,
"resolutionHeight": height,
"pixelAspect": pixel_aspect
"resolutionWidth": instance.data["width"],
"resolutionHeight": instance.data["height"],
"pixelAspect": instance.data["pixelAspect"]
})
in_info['tasks'] = instance.data['tasks']

View file

@ -66,11 +66,14 @@ class CollectPlates(api.InstancePlugin):
item = instance.data["item"]
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
self.log.info("Source Width and Height are: `{0} x {1}`".format(
width, height))
pixel_aspect = int(item.source().mediaSource().pixelAspect())
self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
width, height, pixel_aspect))
data.update({
"width": width,
"height": height
"height": height,
"pixelAspect": pixel_aspect
})
self.log.debug("Creating instance with name: {}".format(data["name"]))
@ -123,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin):
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
"clipInH", "clipOutH", "asset", "track", "version"
"clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect"
]
# pass data to version
@ -133,6 +136,7 @@ class CollectPlatesData(api.InstancePlugin):
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": [f for f in families if 'ftrack' not in f],
"subset": name,
"fps": instance.context.data["fps"]

View file

@ -1,7 +1,7 @@
from pyblish import api
class CollectShots(api.ContextPlugin):
class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
@ -10,39 +10,39 @@ class CollectShots(api.ContextPlugin):
hosts = ["nukestudio"]
families = ["clip"]
def process(self, context):
for instance in context[:]:
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
if tag["name"].lower() == "hierarchy":
tagged = True
def process(self, instance):
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance))
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
if tag["name"].lower() == "hierarchy":
tagged = True
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance)
)
continue
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance)
)
return
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
self.log.debug("_ context: {}".format(context[:]))
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)

View file

@ -0,0 +1,231 @@
# from pype import plugins
import os
import json
import re
import pyblish.api
import tempfile
from avalon import io, api
class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.ExtractorOrder
label = "Export Soft Lut Effects"
families = ["lut"]
def process(self, instance):
item = instance.data["item"]
effects = instance.data.get("effectTrackItems")
instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
if len(subset_split) > 0:
root_name = subset.replace(subset_split[0], "")
subset_split.insert(0, root_name.capitalize())
subset_split.insert(0, "lut")
self.log.debug("creating staging dir")
# staging_dir = self.staging_dir(instance)
# TODO: only provisory will be replace by function
staging_dir = instance.data.get('stagingDir', None)
if not staging_dir:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data['stagingDir'] = staging_dir
self.log.debug("creating staging dir: `{}`".format(staging_dir))
transfers = list()
if "transfers" not in instance.data:
instance.data["transfers"] = list()
name = "".join(subset_split)
ext = "json"
file = name + "." + ext
# create new instance and inherit data
data = {}
for key, value in instance.data.iteritems():
data[key] = value
# change names
data["subset"] = name
data["family"] = "lut"
data["families"] = []
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(file)[1]
)
data["source"] = data["sourcePath"]
# create new instance
instance = instance.context.create_instance(**data)
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
for k, effect in effects["effects"].items():
trn = self.copy_linked_files(effect, dst_dir)
if trn:
transfers.append((trn[0], trn[1]))
instance.data["transfers"].extend(transfers)
self.log.debug("_ transfers: `{}`".format(
instance.data["transfers"]))
# create representations
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
"version"
]
# pass data to version
version_data = dict()
version_data.update({k: instance.data[k] for k in transfer_data})
# add to data of representation
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": ["plate", "lut"],
"subset": name,
"fps": instance.context.data["fps"]
})
instance.data["versionData"] = version_data
representation = {
'files': file,
'stagingDir': staging_dir,
'name': "lut" + ext.title(),
'ext': ext
}
instance.data["representations"].append(representation)
self.log.debug("_ representations: `{}`".format(
instance.data["representations"]))
self.log.debug("_ version_data: `{}`".format(
instance.data["versionData"]))
with open(os.path.join(staging_dir, file), "w") as outfile:
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
return
def copy_linked_files(self, effect, dst_dir):
for k, v in effect["node"].items():
if k in "file" and v is not '':
base_name = os.path.basename(v)
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
# add it to the json
effect["node"][k] = dst
return (v, dst)
def resource_destination_dir(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
return os.path.join(
instance.data["assumedDestination"],
"resources"
)
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset['silo']
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
padding = int(a_template['render']['padding'])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)