Merged in feature/PYPE-331-nks-lut-workflow (pull request #165)

Feature/PYPE-331 nks lut workflow
This commit is contained in:
Jakub Jezek 2019-08-30 13:52:13 +00:00 committed by Jakub Ježek
commit ef6f5c8522
42 changed files with 1944 additions and 392 deletions

View file

@ -23,6 +23,7 @@ from .lib import (
get_asset,
get_project,
get_hierarchy,
get_subsets,
get_version_from_path,
modified_environ,
add_tool_to_environment
@ -53,6 +54,7 @@ __all__ = [
"get_project",
"get_hierarchy",
"get_asset",
"get_subsets",
"get_version_from_path",
"modified_environ",
"add_tool_to_environment",

View file

@ -492,6 +492,72 @@ def filter_pyblish_plugins(plugins):
setattr(plugin, option, value)
# Remove already processed plugins from dictionary
# WARNING Requires plugins with unique names
presets.pop(plugin.__name__)
def get_subsets(asset_name,
regex_filter=None,
version=None,
representations=["exr", "dpx"]):
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
Arguments:
asset_name (str): asset (shot) name
regex_filter (raw): raw string with filter pattern
version (str or int): `last` or number of version
representations (list): list for all representations
Returns:
dict: subsets with version and representaions in keys
"""
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset",
"name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
Check correct name: `{}`".format(asset_name)
# create subsets query filter
filter_query = {"type": "subset", "parent": asset_io["_id"]}
# add reggex filter string into query filter
if regex_filter:
filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}})
else:
filter_query.update({"name": {"$regex": r'.*'}})
# query all assets
subsets = [s for s in io.find(filter_query)]
assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
output_dict = {}
# Process subsets
for subset in subsets:
if not version:
version_sel = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
else:
assert isinstance(version, int), "version needs to be `int` type"
version_sel = io.find_one({"type": "version",
"parent": subset["_id"],
"name": int(version)})
find_dict = {"type": "representation",
"parent": version_sel["_id"]}
filter_repr = {"$or": [{"name": repr} for repr in representations]}
find_dict.update(filter_repr)
repres_out = [i for i in io.find(find_dict)]
if len(repres_out) > 0:
output_dict[subset["name"]] = {"version": version_sel,
"representaions": repres_out}
return output_dict

View file

@ -104,7 +104,7 @@ def install():
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
workfile_settings = lib.WorkfileSettings()
# Disable all families except for the ones we explicitly want to see
family_states = [
"write",
@ -121,7 +121,7 @@ def install():
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
# Set context settings.
nuke.addOnCreate(lib.set_context_settings, nodeClass="Root")
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
menu.install()

View file

@ -1,10 +1,12 @@
import os
import sys
import getpass
from collections import OrderedDict
from pprint import pprint
from avalon import api, io, lib
import avalon.nuke
import pype.api as pype
import nuke
from .templates import (
get_colorspace_preset,
@ -12,6 +14,11 @@ from .templates import (
get_node_colorspace_preset
)
from .templates import (
get_anatomy
)
# TODO: remove get_anatomy and import directly Anatomy() here
from pypeapp import Logger
log = Logger().get_logger(__name__, "nuke")
@ -159,11 +166,6 @@ def format_anatomy(data):
'''
# TODO: perhaps should be nonPublic
from .templates import (
get_anatomy
)
# TODO: remove get_anatomy and import directly Anatomy() here
anatomy = get_anatomy()
log.debug("__ anatomy.templates: {}".format(anatomy.templates))
@ -195,6 +197,7 @@ def script_name():
'''
return nuke.root().knob('name').value()
def add_button_write_to_read(node):
name = "createReadNode"
label = "Create Read"
@ -203,6 +206,7 @@ def add_button_write_to_read(node):
k.setFlag(0x1000)
node.addKnob(k)
def create_write_node(name, data, prenodes=None):
''' Creating write node which is group node
@ -311,7 +315,6 @@ def create_write_node(name, data, prenodes=None):
else:
prev_node = nuke.createNode("Input", "name rgba")
# creating write node
now_node = avalon.nuke.lib.add_write_node("inside_{}".format(name),
**_data
@ -331,7 +334,6 @@ def create_write_node(name, data, prenodes=None):
# imprinting group node
GN = avalon.nuke.imprint(GN, data["avalon"])
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@ -339,7 +341,7 @@ def create_write_node(name, data, prenodes=None):
# adding write to read button
add_button_write_to_read(GN)
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@ -385,130 +387,180 @@ def add_deadline_tab(node):
node.addKnob(knob)
def set_viewers_colorspace(viewer):
''' Adds correct colorspace to viewer
def create_backdrop(label="", color=None, layer=0,
nodes=None):
"""
Create Backdrop node
Arguments:
viewer (obj): nuke viewer node object to be fixed
color (str): nuke compatible string with color code
layer (int): layer of node usually used (self.pos_layer - 1)
label (str): the message
nodes (list): list of nodes to be wrapped into backdrop
'''
assert isinstance(viewer, dict), log.error(
"set_viewers_colorspace(): argument should be dictionary")
"""
assert isinstance(nodes, list), "`nodes` should be a list of nodes"
filter_knobs = [
"viewerProcess",
"wipe_position"
]
viewers = [n for n in nuke.allNodes() if n.Class() == 'Viewer']
erased_viewers = []
# Calculate bounds for the backdrop node.
bdX = min([node.xpos() for node in nodes])
bdY = min([node.ypos() for node in nodes])
bdW = max([node.xpos() + node.screenWidth() for node in nodes]) - bdX
bdH = max([node.ypos() + node.screenHeight() for node in nodes]) - bdY
for v in viewers:
v['viewerProcess'].setValue(str(viewer["viewerProcess"]))
if str(viewer["viewerProcess"]) not in v['viewerProcess'].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
pprint(copy_knobs)
# delete viewer with wrong settings
erased_viewers.append(v['name'].value())
nuke.delete(v)
# Expand the bounds to leave a little border. Elements are offsets
# for left, top, right and bottom edges respectively
left, top, right, bottom = (-20, -65, 20, 60)
bdX += left
bdY += top
bdW += (right - left)
bdH += (bottom - top)
# create new viewer
nv = nuke.createNode("Viewer")
bdn = nuke.createNode("BackdropNode")
bdn["z_order"].setValue(layer)
# connect to original inputs
for i, n in enumerate(copy_inputs):
nv.setInput(i, n)
if color:
bdn["tile_color"].setValue(int(color, 16))
# set coppied knobs
for k, v in copy_knobs.items():
print(k, v)
nv[k].setValue(v)
bdn["xpos"].setValue(bdX)
bdn["ypos"].setValue(bdY)
bdn["bdwidth"].setValue(bdW)
bdn["bdheight"].setValue(bdH)
# set viewerProcess
nv['viewerProcess'].setValue(str(viewer["viewerProcess"]))
if label:
bdn["label"].setValue(label)
if erased_viewers:
log.warning(
"Attention! Viewer nodes {} were erased."
"It had wrong color profile".format(erased_viewers))
bdn["note_font_size"].setValue(20)
return bdn
def set_root_colorspace(root_dict):
''' Adds correct colorspace to root
class WorkfileSettings(object):
"""
All settings for workfile will be set
This object is setting all possible root settings to the workfile.
Including Colorspace, Frame ranges, Resolution format. It can set it
to Root node or to any given node.
Arguments:
root_dict (dict): nuke root node as dictionary
root (node): nuke's root node
nodes (list): list of nuke's nodes
nodes_filter (list): filtering classes for nodes
'''
assert isinstance(root_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
"""
# first set OCIO
if nuke.root()["colorManagement"].value() not in str(root_dict["colorManagement"]):
nuke.root()["colorManagement"].setValue(
str(root_dict["colorManagement"]))
def __init__(self,
root_node=None,
nodes=None,
**kwargs):
self._project = kwargs.get(
"project") or io.find_one({"type": "project"})
self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"]
self._asset_entity = pype.get_asset(self._asset)
self._root_node = root_node or nuke.root()
self._nodes = self.get_nodes(nodes=nodes)
# second set ocio version
if nuke.root()["OCIO_config"].value() not in str(root_dict["OCIO_config"]):
nuke.root()["OCIO_config"].setValue(str(root_dict["OCIO_config"]))
self.data = kwargs
# then set the rest
for knob, value in root_dict.items():
if nuke.root()[knob].value() not in value:
nuke.root()[knob].setValue(str(value))
log.debug("nuke.root()['{}'] changed to: {}".format(knob, value))
def get_nodes(self, nodes=None, nodes_filter=None):
# filter out only dictionaries for node creation
#
# print("\n\n")
# pprint(self._nodes)
#
if not isinstance(nodes, list) and not isinstance(nodes_filter, list):
return [n for n in nuke.allNodes()]
elif not isinstance(nodes, list) and isinstance(nodes_filter, list):
nodes = list()
for filter in nodes_filter:
[nodes.append(n) for n in nuke.allNodes(filter=filter)]
return nodes
elif isinstance(nodes, list) and not isinstance(nodes_filter, list):
return [n for n in self._nodes]
elif isinstance(nodes, list) and isinstance(nodes_filter, list):
for filter in nodes_filter:
return [n for n in self._nodes if filter in n.Class()]
def set_writes_colorspace(write_dict):
''' Adds correct colorspace to write node dict
def set_viewers_colorspace(self, viewer_dict):
''' Adds correct colorspace to viewer
Arguments:
write_dict (dict): nuke write node as dictionary
Arguments:
viewer_dict (dict): adjustments from presets
'''
# TODO: complete this function so any write node in scene will have fixed colorspace following presets for the project
assert isinstance(write_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
'''
assert isinstance(viewer_dict, dict), log.error(
"set_viewers_colorspace(): argument should be dictionary")
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
filter_knobs = [
"viewerProcess",
"wipe_position"
]
erased_viewers = []
for v in [n for n in self._nodes
if "Viewer" in n.Class()]:
v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
if str(viewer_dict["viewerProcess"]) \
not in v['viewerProcess'].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
def set_colorspace():
''' Setting colorpace following presets
'''
nuke_colorspace = get_colorspace_preset().get("nuke", None)
# delete viewer with wrong settings
erased_viewers.append(v['name'].value())
nuke.delete(v)
try:
set_root_colorspace(nuke_colorspace["root"])
except AttributeError:
log.error(
"set_colorspace(): missing `root` settings in template")
try:
set_viewers_colorspace(nuke_colorspace["viewer"])
except AttributeError:
log.error(
"set_colorspace(): missing `viewer` settings in template")
try:
set_writes_colorspace(nuke_colorspace["write"])
except AttributeError:
log.error(
"set_colorspace(): missing `write` settings in template")
# create new viewer
nv = nuke.createNode("Viewer")
try:
for key in nuke_colorspace:
log.debug("Preset's colorspace key: {}".format(key))
except TypeError:
log.error("Nuke is not in templates! \n\n\n"
"contact your supervisor!")
# connect to original inputs
for i, n in enumerate(copy_inputs):
nv.setInput(i, n)
# set coppied knobs
for k, v in copy_knobs.items():
print(k, v)
nv[k].setValue(v)
def reset_frame_range_handles():
"""Set frame range to current asset"""
# set viewerProcess
nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
root = nuke.root()
name = api.Session["AVALON_ASSET"]
asset_entity = pype.get_asset(name)
if erased_viewers:
log.warning(
"Attention! Viewer nodes {} were erased."
"It had wrong color profile".format(erased_viewers))
def set_root_colorspace(self, root_dict):
''' Adds correct colorspace to root
Arguments:
root_dict (dict): adjustmensts from presets
'''
assert isinstance(root_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
# first set OCIO
if self._root_node["colorManagement"].value() \
not in str(root_dict["colorManagement"]):
self._root_node["colorManagement"].setValue(
str(root_dict["colorManagement"]))
# second set ocio version
if self._root_node["OCIO_config"].value() \
not in str(root_dict["OCIO_config"]):
self._root_node["OCIO_config"].setValue(
str(root_dict["OCIO_config"]))
# then set the rest
for knob, value in root_dict.items():
if self._root_node[knob].value() not in value:
self._root_node[knob].setValue(str(value))
log.debug("nuke.root()['{}'] changed to: {}".format(
knob, value))
def set_writes_colorspace(self, write_dict):
''' Adds correct colorspace to write node dict
if "data" not in asset_entity:
msg = "Asset {} don't have set any 'data'".format(name)
@ -516,159 +568,210 @@ def reset_frame_range_handles():
nuke.message(msg)
return
data = asset_entity["data"]
Arguments:
write_dict (dict): nuke write node as dictionary
missing_cols = []
check_cols = ["fps", "frameStart", "frameEnd", "handleStart", "handleEnd"]
'''
# TODO: complete this function so any write node in
# scene will have fixed colorspace following presets for the project
assert isinstance(write_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
for col in check_cols:
if col not in data:
missing_cols.append(col)
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
if len(missing_cols) > 0:
missing = ", ".join(missing_cols)
msg = "'{}' are not set for asset '{}'!".format(missing, name)
log.warning(msg)
nuke.message(msg)
return
def set_colorspace(self):
''' Setting colorpace following presets
'''
nuke_colorspace = get_colorspace_preset().get("nuke", None)
# get handles values
handle_start = asset_entity["data"]["handleStart"]
handle_end = asset_entity["data"]["handleEnd"]
fps = asset_entity["data"]["fps"]
frame_start = int(asset_entity["data"]["frameStart"]) - handle_start
frame_end = int(asset_entity["data"]["frameEnd"]) + handle_end
root["fps"].setValue(fps)
root["first_frame"].setValue(frame_start)
root["last_frame"].setValue(frame_end)
# setting active viewers
nuke.frame(int(asset_entity["data"]["frameStart"]))
range = '{0}-{1}'.format(
int(asset_entity["data"]["frameStart"]),
int(asset_entity["data"]["frameEnd"]))
for node in nuke.allNodes(filter="Viewer"):
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
# adding handle_start/end to root avalon knob
if not avalon.nuke.imprint(root, {
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}):
log.warning("Cannot set Avalon knob to Root node!")
def reset_resolution():
"""Set resolution to project resolution."""
log.info("Reseting resolution")
project = io.find_one({"type": "project"})
asset = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": asset, "type": "asset"})
asset_data = asset.get('data', {})
data = {
"width": int(asset_data.get(
'resolutionWidth',
asset_data.get('resolution_width'))),
"height": int(asset_data.get(
'resolutionHeight',
asset_data.get('resolution_height'))),
"pixel_aspect": asset_data.get(
'pixelAspect',
asset_data.get('pixel_aspect', 1)),
"name": project["name"]
}
if any(x for x in data.values() if x is None):
log.error(
"Missing set shot attributes in DB."
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`".format(**data)
)
return
bbox = asset.get('data', {}).get('crop')
if bbox:
try:
x, y, r, t = bbox.split(".")
data.update(
{
"x": int(x),
"y": int(y),
"r": int(r),
"t": int(t),
}
)
except Exception as e:
bbox = None
self.set_root_colorspace(nuke_colorspace["root"])
except AttributeError:
log.error(
"{}: {} \nFormat:Crop need to be set with dots, example: "
"0.0.1920.1080, /nSetting to default".format(__name__, e)
"set_colorspace(): missing `root` settings in template")
try:
self.set_viewers_colorspace(nuke_colorspace["viewer"])
except AttributeError:
log.error(
"set_colorspace(): missing `viewer` settings in template")
try:
self.set_writes_colorspace(nuke_colorspace["write"])
except AttributeError:
log.error(
"set_colorspace(): missing `write` settings in template")
try:
for key in nuke_colorspace:
log.debug("Preset's colorspace key: {}".format(key))
except TypeError:
log.error("Nuke is not in templates! \n\n\n"
"contact your supervisor!")
def reset_frame_range_handles(self):
"""Set frame range to current asset"""
if "data" not in self._asset_entity:
msg = "Asset {} don't have set any 'data'".format(self._asset)
log.warning(msg)
nuke.message(msg)
return
data = self._asset_entity["data"]
missing_cols = []
check_cols = ["fps", "frameStart", "frameEnd",
"handleStart", "handleEnd"]
for col in check_cols:
if col not in data:
missing_cols.append(col)
if len(missing_cols) > 0:
missing = ", ".join(missing_cols)
msg = "'{}' are not set for asset '{}'!".format(
missing, self._asset)
log.warning(msg)
nuke.message(msg)
return
# get handles values
handle_start = data["handleStart"]
handle_end = data["handleEnd"]
fps = data["fps"]
frame_start = int(data["frameStart"]) - handle_start
frame_end = int(data["frameEnd"]) + handle_end
self._root_node["fps"].setValue(fps)
self._root_node["first_frame"].setValue(frame_start)
self._root_node["last_frame"].setValue(frame_end)
# setting active viewers
try:
nuke.frame(int(data["frameStart"]))
except Exception as e:
log.warning("no viewer in scene: `{}`".format(e))
range = '{0}-{1}'.format(
int(data["frameStart"]),
int(data["frameEnd"]))
for node in nuke.allNodes(filter="Viewer"):
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
# adding handle_start/end to root avalon knob
if not avalon.nuke.imprint(self._root_node, {
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}):
log.warning("Cannot set Avalon knob to Root node!")
def reset_resolution(self):
"""Set resolution to project resolution."""
log.info("Reseting resolution")
project = io.find_one({"type": "project"})
asset = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": asset, "type": "asset"})
asset_data = asset.get('data', {})
data = {
"width": int(asset_data.get(
'resolutionWidth',
asset_data.get('resolution_width'))),
"height": int(asset_data.get(
'resolutionHeight',
asset_data.get('resolution_height'))),
"pixel_aspect": asset_data.get(
'pixelAspect',
asset_data.get('pixel_aspect', 1)),
"name": project["name"]
}
if any(x for x in data.values() if x is None):
log.error(
"Missing set shot attributes in DB."
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`".format(**data)
)
existing_format = None
for format in nuke.formats():
if data["name"] == format.name():
existing_format = format
break
if existing_format:
# Enforce existing format to be correct.
existing_format.setWidth(data["width"])
existing_format.setHeight(data["height"])
existing_format.setPixelAspect(data["pixel_aspect"])
bbox = self._asset_entity.get('data', {}).get('crop')
if bbox:
existing_format.setX(data["x"])
existing_format.setY(data["y"])
existing_format.setR(data["r"])
existing_format.setT(data["t"])
else:
format_string = make_format_string(**data)
log.info("Creating new format: {}".format(format_string))
nuke.addFormat(format_string)
try:
x, y, r, t = bbox.split(".")
data.update(
{
"x": int(x),
"y": int(y),
"r": int(r),
"t": int(t),
}
)
except Exception as e:
bbox = None
log.error(
"{}: {} \nFormat:Crop need to be set with dots, example: "
"0.0.1920.1080, /nSetting to default".format(__name__, e)
)
nuke.root()["format"].setValue(data["name"])
log.info("Format is set.")
existing_format = None
for format in nuke.formats():
if data["name"] == format.name():
existing_format = format
break
if existing_format:
# Enforce existing format to be correct.
existing_format.setWidth(data["width"])
existing_format.setHeight(data["height"])
existing_format.setPixelAspect(data["pixel_aspect"])
def make_format_string(**kwargs):
if kwargs.get("r"):
return (
"{width} "
"{height} "
"{x} "
"{y} "
"{r} "
"{t} "
"{pixel_aspect:.2f} "
"{name}".format(**kwargs)
)
else:
return (
"{width} "
"{height} "
"{pixel_aspect:.2f} "
"{name}".format(**kwargs)
)
if bbox:
existing_format.setX(data["x"])
existing_format.setY(data["y"])
existing_format.setR(data["r"])
existing_format.setT(data["t"])
else:
format_string = self.make_format_string(**data)
log.info("Creating new format: {}".format(format_string))
nuke.addFormat(format_string)
nuke.root()["format"].setValue(data["name"])
log.info("Format is set.")
def set_context_settings():
# replace reset resolution from avalon core to pype's
reset_resolution()
# replace reset resolution from avalon core to pype's
reset_frame_range_handles()
# add colorspace menu item
set_colorspace()
def make_format_string(self, **kwargs):
if kwargs.get("r"):
return (
"{width} "
"{height} "
"{x} "
"{y} "
"{r} "
"{t} "
"{pixel_aspect:.2f} "
"{name}".format(**kwargs)
)
else:
return (
"{width} "
"{height} "
"{pixel_aspect:.2f} "
"{name}".format(**kwargs)
)
def set_context_settings(self):
# replace reset resolution from avalon core to pype's
self.reset_resolution()
# replace reset resolution from avalon core to pype's
self.reset_frame_range_handles()
# add colorspace menu item
self.set_colorspace()
def get_hierarchical_attr(entity, attr, default=None):
@ -726,3 +829,297 @@ def get_write_node_template_attr(node):
# fix badly encoded data
return avalon.nuke.lib.fix_data_for_node_create(correct_data)
class BuildWorkfile(WorkfileSettings):
"""
Building first version of workfile.
Settings are taken from presets and db. It will add all subsets in last version for defined representaions
Arguments:
variable (type): description
"""
xpos = 0
ypos = 0
xpos_size = 80
ypos_size = 90
xpos_gap = 50
ypos_gap = 50
pos_layer = 10
def __init__(self,
root_path=None,
root_node=None,
nodes=None,
to_script=None,
**kwargs):
"""
A short description.
A bit longer description.
Argumetns:
root_path (str): description
root_node (nuke.Node): description
nodes (list): list of nuke.Node
nodes_effects (dict): dictionary with subsets
Example:
nodes_effects = {
"plateMain": {
"nodes": [
[("Class", "Reformat"),
("resize", "distort"),
("flip", True)],
[("Class", "Grade"),
("blackpoint", 0.5),
("multiply", 0.4)]
]
},
}
"""
WorkfileSettings.__init__(self,
root_node=root_node,
nodes=nodes,
**kwargs)
self.to_script = to_script
# collect data for formating
data = {
"root": root_path or api.Session["AVALON_PROJECTS"],
"project": {"name": self._project["name"],
"code": self._project["data"].get("code", '')},
"asset": self._asset or os.environ["AVALON_ASSET"],
"task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(),
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
"version": kwargs.get("version", {}).get("name", 1),
"user": getpass.getuser(),
"comment": "firstBuild"
}
# get presets from anatomy
anatomy = get_anatomy()
# format anatomy
anatomy_filled = anatomy.format(data)
# get dir and file for workfile
self.work_dir = anatomy_filled["avalon"]["work"]
self.work_file = anatomy_filled["avalon"]["workfile"] + ".nk"
def save_script_as(self, path=None):
# first clear anything in open window
nuke.scriptClear()
if not path:
dir = self.work_dir
path = os.path.join(
self.work_dir,
self.work_file).replace("\\", "/")
else:
dir = os.path.dirname(path)
# check if folder is created
if not os.path.exists(dir):
os.makedirs(dir)
# save script to path
nuke.scriptSaveAs(path)
def process(self,
regex_filter=None,
version=None,
representations=["exr", "dpx", "lutJson"]):
"""
A short description.
A bit longer description.
Args:
regex_filter (raw string): regex pattern to filter out subsets
version (int): define a particular version, None gets last
representations (list):
Returns:
type: description
Raises:
Exception: description
"""
if not self.to_script:
# save the script
self.save_script_as()
# create viewer and reset frame range
viewer = self.get_nodes(nodes_filter=["Viewer"])
if not viewer:
vn = nuke.createNode("Viewer")
vn["xpos"].setValue(self.xpos)
vn["ypos"].setValue(self.ypos)
else:
vn = viewer[-1]
# move position
self.position_up()
wn = self.write_create()
wn["xpos"].setValue(self.xpos)
wn["ypos"].setValue(self.ypos)
wn["render"].setValue(True)
vn.setInput(0, wn)
bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
color='0xcc1102ff', layer=-1,
nodes=[wn])
# move position
self.position_up(4)
# set frame range for new viewer
self.reset_frame_range_handles()
# get all available representations
subsets = pype.get_subsets(self._asset,
regex_filter=regex_filter,
version=version,
representations=representations)
nodes_backdrop = list()
for name, subset in subsets.items():
if "lut" in name:
continue
log.info("Building Loader to: `{}`".format(name))
version = subset["version"]
log.info("Version to: `{}`".format(version["name"]))
representations = subset["representaions"]
for repr in representations:
rn = self.read_loader(repr)
rn["xpos"].setValue(self.xpos)
rn["ypos"].setValue(self.ypos)
wn.setInput(0, rn)
# get editional nodes
lut_subset = [s for n, s in subsets.items()
if "lut{}".format(name.lower()) in n.lower()]
log.debug(">> lut_subset: `{}`".format(lut_subset))
if len(lut_subset) > 0:
lsub = lut_subset[0]
fxn = self.effect_loader(lsub["representaions"][-1])
fxn_ypos = fxn["ypos"].value()
fxn["ypos"].setValue(fxn_ypos - 100)
nodes_backdrop.append(fxn)
nodes_backdrop.append(rn)
# move position
self.position_right()
bdn = self.create_backdrop(label="Loaded Reads",
color='0x2d7702ff', layer=-1,
nodes=nodes_backdrop)
def read_loader(self, representation):
"""
Gets Loader plugin for image sequence or mov
Arguments:
representation (dict): avalon db entity
"""
context = representation["context"]
loader_name = "LoadSequence"
if "mov" in context["representation"]:
loader_name = "LoadMov"
loader_plugin = None
for Loader in api.discover(api.Loader):
if Loader.__name__ != loader_name:
continue
loader_plugin = Loader
return api.load(Loader=loader_plugin,
representation=representation["_id"])
def effect_loader(self, representation):
"""
Gets Loader plugin for effects
Arguments:
representation (dict): avalon db entity
"""
context = representation["context"]
loader_name = "LoadLuts"
loader_plugin = None
for Loader in api.discover(api.Loader):
if Loader.__name__ != loader_name:
continue
loader_plugin = Loader
return api.load(Loader=loader_plugin,
representation=representation["_id"])
def write_create(self):
"""
Create render write
Arguments:
representation (dict): avalon db entity
"""
Create_name = "CreateWriteRender"
creator_plugin = None
for Creator in api.discover(api.Creator):
if Creator.__name__ != Create_name:
continue
creator_plugin = Creator
# return api.create()
return creator_plugin("render_writeMain", self._asset).process()
def create_backdrop(self, label="", color=None, layer=0,
nodes=None):
"""
Create Backdrop node
Arguments:
color (str): nuke compatible string with color code
layer (int): layer of node usually used (self.pos_layer - 1)
label (str): the message
nodes (list): list of nodes to be wrapped into backdrop
"""
assert isinstance(nodes, list), "`nodes` should be a list of nodes"
layer = self.pos_layer + layer
create_backdrop(label=label, color=color, layer=layer, nodes=nodes)
def position_reset(self, xpos=0, ypos=0):
self.xpos = xpos
self.ypos = ypos
def position_right(self, multiply=1):
self.xpos += (self.xpos_size * multiply) + self.xpos_gap
def position_left(self, multiply=1):
self.xpos -= (self.xpos_size * multiply) + self.xpos_gap
def position_down(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
def position_up(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap

View file

@ -9,7 +9,7 @@ log = Logger().get_logger(__name__, "nuke")
def install():
menubar = nuke.menu("Nuke")
menu = menubar.findItem(Session["AVALON_LABEL"])
workfile_settings = lib.WorkfileSettings()
# replace reset resolution from avalon core to pype's
name = "Reset Resolution"
new_name = "Set Resolution"
@ -20,7 +20,7 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
menu.addCommand(new_name, lib.reset_resolution, index=(rm_item[0]))
menu.addCommand(new_name, workfile_settings.reset_resolution, index=(rm_item[0]))
# replace reset frame range from avalon core to pype's
name = "Reset Frame Range"
@ -31,20 +31,28 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
menu.addCommand(new_name, lib.reset_frame_range_handles, index=(rm_item[0]))
menu.addCommand(new_name, workfile_settings.reset_frame_range_handles, index=(rm_item[0]))
# add colorspace menu item
name = "Set colorspace"
menu.addCommand(
name, lib.set_colorspace,
name, workfile_settings.set_colorspace,
index=(rm_item[0]+2)
)
log.debug("Adding menu item: {}".format(name))
# add workfile builder menu item
name = "Build First Workfile.."
menu.addCommand(
name, lib.BuildWorkfile().process,
index=(rm_item[0]+7)
)
log.debug("Adding menu item: {}".format(name))
# add item that applies all setting above
name = "Apply all settings"
menu.addCommand(
name, lib.set_context_settings, index=(rm_item[0]+3)
name, workfile_settings.set_context_settings, index=(rm_item[0]+3)
)
log.debug("Adding menu item: {}".format(name))

View file

@ -189,7 +189,7 @@ def add_submission():
class PublishAction(QtWidgets.QAction):
"""
Action with is showing as menu item
Action with is showing as menu item
"""
def __init__(self):
@ -287,3 +287,59 @@ def _show_no_gui():
messagebox.setStandardButtons(messagebox.Ok)
messagebox.exec_()
def CreateNukeWorkfile(nodes=None,
nodes_effects=None,
to_timeline=False,
**kwargs):
''' Creating nuke workfile with particular version with given nodes
Also it is creating timeline track items as precomps.
Arguments:
nodes(list of dict): each key in dict is knob order is important
to_timeline(type): will build trackItem with metadata
Returns:
bool: True if done
Raises:
Exception: with traceback
'''
import hiero.core
from avalon.nuke import imprint
from pype.nuke import (
lib as nklib
)
# check if the file exists if does then Raise "File exists!"
if os.path.exists(filepath):
raise FileExistsError("File already exists: `{}`".format(filepath))
# if no representations matching then
# Raise "no representations to be build"
if len(representations) == 0:
raise AttributeError("Missing list of `representations`")
# check nodes input
if len(nodes) == 0:
log.warning("Missing list of `nodes`")
# create temp nk file
nuke_script = hiero.core.nuke.ScriptWriter()
# create root node and save all metadata
root_node = hiero.core.nuke.RootNode()
root_path = os.environ["AVALON_PROJECTS"]
nuke_script.addNode(root_node)
# here to call pype.nuke.lib.BuildWorkfile
script_builder = nklib.BuildWorkfile(
root_node=root_node,
root_path=root_path,
nodes=nuke_script.getNodes(),
**kwargs
)

View file

@ -2,11 +2,11 @@ import pyblish.api
from avalon import io
class IntegrateHierarchyToAvalon(pyblish.api.ContextPlugin):
class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
"""Create entities in Avalon based on collected data."""
order = pyblish.api.IntegratorOrder - 0.1
label = "Integrate Hierarchy To Avalon"
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract Hierarchy To Avalon"
families = ["clip", "shot"]
def process(self, context):

View file

@ -63,6 +63,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
"lut",
"audio"
]
exclude_families = ["clip"]

View file

@ -69,11 +69,11 @@ class CreateWriteRender(avalon.nuke.Creator):
write_data.update({
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
node = create_write_node(self.data["subset"], write_data)
# Deadline tab.
add_deadline_tab(node)
return create_write_node(self.data["subset"], write_data)
class CreateWritePrerender(avalon.nuke.Creator):
# change this to template preset

View file

@ -0,0 +1,317 @@
from avalon import api, style, io
import nuke
import json
from collections import OrderedDict
class LoadLuts(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["lutJson"]
families = ["lut"]
label = "Load Luts - nodes"
order = 0
icon = "cc"
color = style.colors.light
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# import dependencies
from avalon.nuke import containerise
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode("Group")
GN["name"].setValue(object_name)
# adding content to the group node
with GN:
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if isinstance(v, list) and len(v) > 4:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to find parent read node
self.connect_read_node(GN, namespace, json_f["assignTo"])
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
return containerise(
node=GN,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
from avalon.nuke import (
update_container
)
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = api.get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# Update the imprinted representation
update_container(
GN,
data_imprint
)
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# adding content to the group node
with GN:
# first remove all nodes
[nuke.delete(n) for n in nuke.allNodes()]
# create input node
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if isinstance(v, list) and len(v) > 3:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
# create output node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to find parent read node
self.connect_read_node(GN, namespace, json_f["assignTo"])
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd84f20ff", 16))
else:
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("udated to version: {}".format(version.get("name")))
def connect_read_node(self, group_node, asset, subset):
"""
Finds read node and selects it
Arguments:
asset (str): asset name
Returns:
nuke node: node is selected
None: if nothing found
"""
search_name = "{0}_{1}".format(asset, subset)
node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
if len(node) > 0:
rn = node[0]
else:
rn = None
# Parent read node has been found
# solving connections
if rn:
dep_nodes = rn.dependent()
if len(dep_nodes) > 0:
for dn in dep_nodes:
dn.setInput(0, group_node)
group_node.setInput(0, rn)
group_node.autoplace()
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
for subTrackIndex in range(
min(subTrackNums), max(subTrackNums) + 1):
item = self.get_item(data, trackIndex, subTrackIndex)
if item is not {}:
new_order.update(item)
return new_order
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -0,0 +1,330 @@
from avalon import api, style, io
import nuke
import json
from collections import OrderedDict
from pype.nuke import lib
class LoadLutsInputProcess(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["lutJson"]
families = ["lut"]
label = "Load Luts - Input Process"
order = 0
icon = "eye"
color = style.colors.alert
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# import dependencies
from avalon.nuke import containerise
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode("Group")
GN["name"].setValue(object_name)
# adding content to the group node
with GN:
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if isinstance(v, list) and len(v) > 4:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to place it under Viewer1
if not self.connect_active_viewer(GN):
nuke.delete(GN)
return
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
return containerise(
node=GN,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
from avalon.nuke import (
update_container
)
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = api.get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# Update the imprinted representation
update_container(
GN,
data_imprint
)
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# adding content to the group node
with GN:
# first remove all nodes
[nuke.delete(n) for n in nuke.allNodes()]
# create input node
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if isinstance(v, list) and len(v) > 3:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
# create output node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to place it under Viewer1
if not self.connect_active_viewer(GN):
nuke.delete(GN)
return
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd84f20ff", 16))
else:
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("udated to version: {}".format(version.get("name")))
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
self.log.error("Please create Viewer node before you run this action again")
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
lib.create_backdrop(label="Input Process", layer=2, nodes=[viewer, group_node], color="0x7c7faaff")
return True
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
for subTrackIndex in range(
min(subTrackNums), max(subTrackNums) + 1):
item = self.get_item(data, trackIndex, subTrackIndex)
if item is not {}:
new_order.update(item)
return new_order
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -101,7 +101,8 @@ class LoadMov(api.Loader):
handles = version_data.get("handles", None)
handle_start = version_data.get("handleStart", None)
handle_end = version_data.get("handleEnd", None)
repr_cont = context["representation"]["context"]
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
@ -119,9 +120,11 @@ class LoadMov(api.Loader):
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
read_name = "Read"
read_name += '_' + context["representation"]["context"]["subset"]
read_name += '_' + context["representation"]["name"]
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():

View file

@ -96,6 +96,8 @@ class LoadSequence(api.Loader):
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
@ -104,10 +106,17 @@ class LoadSequence(api.Loader):
if namespace is None:
namespace = context['asset']['name']
first -= self.handle_start
last += self.handle_end
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
read_name = "Read_" + context["representation"]["context"]["subset"]
repr_cont = context["representation"]["context"]
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@ -227,7 +236,8 @@ class LoadSequence(api.Loader):
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
@ -237,6 +247,9 @@ class LoadSequence(api.Loader):
"{} ({})".format(node['name'].value(), representation))
first = 0
first -= self.handle_start
last += self.handle_end
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file["path"])

View file

@ -20,83 +20,114 @@ class CollectClips(api.ContextPlugin):
projectdata = context.data["projectData"]
version = context.data.get("version", "001")
instances_data = []
for item in context.data.get("selection", []):
# Skip audio track items
# Try/Except is to handle items types, like EffectTrackItem
try:
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
if str(item.mediaType()) != media_type:
sequence = context.data.get("activeSequence")
selection = context.data.get("selection")
track_effects = dict()
# collect all trackItems as instances
for track_index, video_track in enumerate(sequence.videoTracks()):
items = video_track.items()
sub_items = video_track.subTrackItems()
for item in items:
# compare with selection or if disabled
if item not in selection or not item.isEnabled():
continue
except:
# Skip audio track items
# Try/Except is to handle items types, like EffectTrackItem
try:
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
if str(item.mediaType()) != media_type:
continue
except Exception:
continue
asset = item.name()
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
effects = [f for f in item.linkedItems() if f.isEnabled()]
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script
# rather than opening it.
if source_path.endswith(".nk"):
nuke.scriptOpen(source_path)
# There should noly be one.
write_node = nuke.allNodes(filter="Write")[0]
path = nuke.filename(write_node)
if "%" in path:
# Get start frame from Nuke script and use the item source
# in/out, because you can have multiple shots covered with
# one nuke script.
start_frame = int(nuke.root()["first_frame"].getValue())
if write_node["use_limit"].getValue():
start_frame = int(write_node["first"].getValue())
path = path % (start_frame + item.sourceIn())
source_path = path
self.log.debug(
"Fetched source path \"{}\" from \"{}\" in "
"\"{}\".".format(
source_path, write_node.name(), source.firstpath()
)
)
try:
head, padding, ext = os.path.basename(source_path).split(".")
source_first_frame = int(padding)
except Exception:
source_first_frame = 0
data = {"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"sourcePath": source_path,
"track": track.name(),
"trackIndex": track_index,
"sourceFirst": source_first_frame,
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut()),
"asset": asset,
"family": "clip",
"families": [],
"handles": 0,
"handleStart": projectdata.get("handles", 0),
"handleEnd": projectdata.get("handles", 0),
"version": int(version)}
instance = context.create_instance(**data)
self.log.info("Created instance: {}".format(instance))
self.log.debug(">> effects: {}".format(instance.data["effects"]))
context.data["assetsShared"][asset] = dict()
# from now we are collecting only subtrackitems on
# track with no video items
if len(items) > 0:
continue
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
# create list in track key
# get all subTrackItems and add it to context
track_effects[track_index] = list()
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script
# rather than opening it.
if source_path.endswith(".nk"):
nuke.scriptOpen(source_path)
# There should noly be one.
write_node = nuke.allNodes(filter="Write")[0]
path = nuke.filename(write_node)
# collect all subtrack items
for sitem in sub_items:
# unwrap from tuple >> it is always tuple with one item
sitem = sitem[0]
# checking if not enabled
if not sitem.isEnabled():
continue
if "%" in path:
# Get start frame from Nuke script and use the item source
# in/out, because you can have multiple shots covered with
# one nuke script.
start_frame = int(nuke.root()["first_frame"].getValue())
if write_node["use_limit"].getValue():
start_frame = int(write_node["first"].getValue())
track_effects[track_index].append(sitem)
path = path % (start_frame + item.sourceIn())
source_path = path
self.log.debug(
"Fetched source path \"{}\" from \"{}\" in "
"\"{}\".".format(
source_path, write_node.name(), source.firstpath()
)
)
try:
head, padding, ext = os.path.basename(source_path).split(".")
source_first_frame = int(padding)
except:
source_first_frame = 0
instances_data.append(
{
"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"sourcePath": source_path,
"track": track.name(),
"sourceFirst": source_first_frame,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut())
}
)
for data in instances_data:
data.update(
{
"asset": data["item"].name(),
"family": "clip",
"families": [],
"handles": 0,
"handleStart": projectdata.get("handles", 0),
"handleEnd": projectdata.get("handles", 0),
"version": int(version)
}
)
instance = context.create_instance(**data)
self.log.debug(
"Created instance with data: {}".format(instance.data)
)
context.data["assetsShared"][data["asset"]] = dict()
context.data["trackEffects"] = track_effects
self.log.debug(">> sub_track_items: `{}`".format(track_effects))

View file

@ -0,0 +1,96 @@
import pyblish.api
import re
class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.CollectorOrder + 0.1015
label = "Collect Soft Lut Effects"
families = ["clip"]
def process(self, instance):
self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
# taking active sequence
subset = instance.data["subset"]
track_effects = instance.context.data.get("trackEffects", {})
track_index = instance.data["trackIndex"]
effects = instance.data["effects"]
# creating context attribute
self.effects = {"assignTo": subset, "effects": dict()}
for sitem in effects:
self.add_effect(instance, track_index, sitem)
for t_index, sitems in track_effects.items():
for sitem in sitems:
if not t_index > track_index:
continue
self.log.debug(">> sitem: `{}`".format(sitem))
self.add_effect(instance, t_index, sitem)
if self.effects["effects"]:
instance.data["effectTrackItems"] = self.effects
if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
instance.data["families"] += ["lut"]
self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))
def add_effect(self, instance, track_index, item):
track = item.parentTrack().name()
# node serialization
node = item.node()
node_serialized = self.node_serialisation(instance, node)
# collect timelineIn/Out
effect_t_in = int(item.timelineIn())
effect_t_out = int(item.timelineOut())
node_name = item.name()
node_class = re.sub(r"\d+", "", node_name)
self.effects["effects"].update({node_name: {
"class": node_class,
"timelineIn": effect_t_in,
"timelineOut": effect_t_out,
"subTrackIndex": item.subTrackIndex(),
"trackIndex": track_index,
"track": track,
"node": node_serialized
}})
def node_serialisation(self, instance, node):
node_serialized = {}
timeline_in_h = instance.data["clipInH"]
timeline_out_h = instance.data["clipOutH"]
# adding ignoring knob keys
_ignoring_keys = ['invert_mask', 'help', 'mask',
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
'channels', 'maskChannelMask', 'maskChannelInput',
'note_font', 'note_font_size', 'unpremult',
'postage_stamp_frame', 'maskChannel', 'export_cc',
'select_cccid', 'mix', 'version']
# loop trough all knobs and collect not ignored
# and any with any value
for knob in node.knobs().keys():
# skip nodes in ignore keys
if knob in _ignoring_keys:
continue
# get animation if node is animated
if node[knob].isAnimated():
# grab animation including handles
knob_anim = [node[knob].getValueAt(i)
for i in range(timeline_in_h, timeline_out_h + 1)]
node_serialized[knob] = knob_anim
else:
node_serialized[knob] = node[knob].value()
return node_serialized

View file

@ -38,6 +38,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
sequence = context.data['activeSequence']
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# build data for inner nukestudio project property
data = {
@ -157,6 +161,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
"width": width,
"height": height,
"pixelAspect": pixel_aspect,
"tasks": instance.data["tasks"]
})
@ -191,7 +198,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
def process(self, context):
instances = context[:]
sequence = context.data['activeSequence']
# create hierarchyContext attr if context has none
temp_context = {}
@ -216,6 +223,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["width"] = s_asset_data["width"]
instance.data["height"] = s_asset_data["height"]
instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
@ -265,16 +275,10 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding SourceResolution if Tag was present
if instance.data.get("main"):
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format(
width, height, pixel_aspect))
in_info['custom_attributes'].update({
"resolutionWidth": width,
"resolutionHeight": height,
"pixelAspect": pixel_aspect
"resolutionWidth": instance.data["width"],
"resolutionHeight": instance.data["height"],
"pixelAspect": instance.data["pixelAspect"]
})
in_info['tasks'] = instance.data['tasks']

View file

@ -66,11 +66,14 @@ class CollectPlates(api.InstancePlugin):
item = instance.data["item"]
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
self.log.info("Source Width and Height are: `{0} x {1}`".format(
width, height))
pixel_aspect = int(item.source().mediaSource().pixelAspect())
self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
width, height, pixel_aspect))
data.update({
"width": width,
"height": height
"height": height,
"pixelAspect": pixel_aspect
})
self.log.debug("Creating instance with name: {}".format(data["name"]))
@ -123,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin):
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
"clipInH", "clipOutH", "asset", "track", "version"
"clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect"
]
# pass data to version
@ -133,6 +136,7 @@ class CollectPlatesData(api.InstancePlugin):
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": [f for f in families if 'ftrack' not in f],
"subset": name,
"fps": instance.context.data["fps"]

View file

@ -14,12 +14,4 @@ class CollectSelection(pyblish.api.ContextPlugin):
self.log.debug("selection: {}".format(selection))
# if not selection:
# self.log.debug(
# "Nothing is selected. Collecting all items from sequence "
# "\"{}\"".format(hiero.ui.activeSequence())
# )
# for track in hiero.ui.activeSequence().items():
# selection.extend(track.items())
context.data["selection"] = selection

View file

@ -1,7 +1,7 @@
from pyblish import api
class CollectShots(api.ContextPlugin):
class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
@ -10,39 +10,39 @@ class CollectShots(api.ContextPlugin):
hosts = ["nukestudio"]
families = ["clip"]
def process(self, context):
for instance in context[:]:
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
if tag["name"].lower() == "hierarchy":
tagged = True
def process(self, instance):
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance))
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
if tag["name"].lower() == "hierarchy":
tagged = True
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance)
)
continue
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance)
)
return
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
self.log.debug("_ context: {}".format(context[:]))
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)

View file

@ -19,13 +19,14 @@ class CollectClipTagFrameStart(api.InstancePlugin):
# gets only task family tags and collect labels
if "frameStart" in t_family:
t_value = t_metadata.get("tag.value", "")
t_value = t_metadata.get("tag.value", None)
# backward compatibility
t_number = t_metadata.get("tag.number", "")
t_number = t_metadata.get("tag.number", None)
start_frame = t_number or t_value
try:
start_frame = int(t_number) or int(t_value)
start_frame = int(start_frame)
except ValueError:
if "source" in t_value:
source_first = instance.data["sourceFirst"]

View file

@ -0,0 +1,231 @@
# from pype import plugins
import os
import json
import re
import pyblish.api
import tempfile
from avalon import io, api
class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.ExtractorOrder
label = "Export Soft Lut Effects"
families = ["lut"]
def process(self, instance):
item = instance.data["item"]
effects = instance.data.get("effectTrackItems")
instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
if len(subset_split) > 0:
root_name = subset.replace(subset_split[0], "")
subset_split.insert(0, root_name.capitalize())
subset_split.insert(0, "lut")
self.log.debug("creating staging dir")
# staging_dir = self.staging_dir(instance)
# TODO: only provisory will be replace by function
staging_dir = instance.data.get('stagingDir', None)
if not staging_dir:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data['stagingDir'] = staging_dir
self.log.debug("creating staging dir: `{}`".format(staging_dir))
transfers = list()
if "transfers" not in instance.data:
instance.data["transfers"] = list()
name = "".join(subset_split)
ext = "json"
file = name + "." + ext
# create new instance and inherit data
data = {}
for key, value in instance.data.iteritems():
data[key] = value
# change names
data["subset"] = name
data["family"] = "lut"
data["families"] = []
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(file)[1]
)
data["source"] = data["sourcePath"]
# create new instance
instance = instance.context.create_instance(**data)
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
for k, effect in effects["effects"].items():
trn = self.copy_linked_files(effect, dst_dir)
if trn:
transfers.append((trn[0], trn[1]))
instance.data["transfers"].extend(transfers)
self.log.debug("_ transfers: `{}`".format(
instance.data["transfers"]))
# create representations
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
"version"
]
# pass data to version
version_data = dict()
version_data.update({k: instance.data[k] for k in transfer_data})
# add to data of representation
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": ["plate", "lut"],
"subset": name,
"fps": instance.context.data["fps"]
})
instance.data["versionData"] = version_data
representation = {
'files': file,
'stagingDir': staging_dir,
'name': "lut" + ext.title(),
'ext': ext
}
instance.data["representations"].append(representation)
self.log.debug("_ representations: `{}`".format(
instance.data["representations"]))
self.log.debug("_ version_data: `{}`".format(
instance.data["versionData"]))
with open(os.path.join(staging_dir, file), "w") as outfile:
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
return
def copy_linked_files(self, effect, dst_dir):
for k, v in effect["node"].items():
if k in "file" and v is not '':
base_name = os.path.basename(v)
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
# add it to the json
effect["node"][k] = dst
return (v, dst)
def resource_destination_dir(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
return os.path.join(
instance.data["assumedDestination"],
"resources"
)
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset['silo']
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
padding = int(a_template['render']['padding'])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 199 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 215 KiB

After

Width:  |  Height:  |  Size: 6.1 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

After

Width:  |  Height:  |  Size: 10 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 194 KiB

After

Width:  |  Height:  |  Size: 5.7 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 8.2 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 190 KiB

After

Width:  |  Height:  |  Size: 8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

After

Width:  |  Height:  |  Size: 5.6 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 103 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 4.5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

After

Width:  |  Height:  |  Size: 7.5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.6 KiB

After

Width:  |  Height:  |  Size: 3.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 8.1 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 4.7 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 87 KiB

After

Width:  |  Height:  |  Size: 7.9 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 7.4 KiB

Before After
Before After