mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into feature/PYPE-612_blender
This commit is contained in:
commit
bdd31bf81d
20 changed files with 1653 additions and 226 deletions
538
pype/ftrack/actions/action_delivery.py
Normal file
538
pype/ftrack/actions/action_delivery.py
Normal file
|
|
@ -0,0 +1,538 @@
|
|||
import os
|
||||
import copy
|
||||
import shutil
|
||||
import collections
|
||||
import string
|
||||
|
||||
import clique
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from avalon import pipeline
|
||||
from avalon.vendor import filelink
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
|
||||
from pypeapp import Anatomy
|
||||
from pype.ftrack import BaseAction
|
||||
from pype.ftrack.lib.avalon_sync import CustAttrIdKey
|
||||
|
||||
|
||||
class Delivery(BaseAction):
|
||||
'''Edit meta data action.'''
|
||||
|
||||
#: Action identifier.
|
||||
identifier = "delivery.action"
|
||||
#: Action label.
|
||||
label = "Delivery"
|
||||
#: Action description.
|
||||
description = "Deliver data to client"
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ["Pypeclub", "Administrator", "Project manager"]
|
||||
icon = '{}/ftrack/action_icons/Delivery.svg'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
|
||||
db_con = DbConnector()
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
for entity in entities:
|
||||
if entity.entity_type.lower() == "assetversion":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
if event["data"].get("values", {}):
|
||||
return
|
||||
|
||||
title = "Delivery data to Client"
|
||||
|
||||
items = []
|
||||
item_splitter = {"type": "label", "value": "---"}
|
||||
|
||||
# Prepare component names for processing
|
||||
components = None
|
||||
project = None
|
||||
for entity in entities:
|
||||
if project is None:
|
||||
project_id = None
|
||||
for ent_info in entity["link"]:
|
||||
if ent_info["type"].lower() == "project":
|
||||
project_id = ent_info["id"]
|
||||
break
|
||||
|
||||
if project_id is None:
|
||||
project = entity["asset"]["parent"]["project"]
|
||||
else:
|
||||
project = session.query((
|
||||
"select id, full_name from Project where id is \"{}\""
|
||||
).format(project_id)).one()
|
||||
|
||||
_components = set(
|
||||
[component["name"] for component in entity["components"]]
|
||||
)
|
||||
if components is None:
|
||||
components = _components
|
||||
continue
|
||||
|
||||
components = components.intersection(_components)
|
||||
if not components:
|
||||
break
|
||||
|
||||
project_name = project["full_name"]
|
||||
items.append({
|
||||
"type": "hidden",
|
||||
"name": "__project_name__",
|
||||
"value": project_name
|
||||
})
|
||||
|
||||
# Prpeare anatomy data
|
||||
anatomy = Anatomy(project_name)
|
||||
new_anatomies = []
|
||||
first = None
|
||||
for key in (anatomy.templates.get("delivery") or {}):
|
||||
new_anatomies.append({
|
||||
"label": key,
|
||||
"value": key
|
||||
})
|
||||
if first is None:
|
||||
first = key
|
||||
|
||||
skipped = False
|
||||
# Add message if there are any common components
|
||||
if not components or not new_anatomies:
|
||||
skipped = True
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "<h1>Something went wrong:</h1>"
|
||||
})
|
||||
|
||||
items.append({
|
||||
"type": "hidden",
|
||||
"name": "__skipped__",
|
||||
"value": skipped
|
||||
})
|
||||
|
||||
if not components:
|
||||
if len(entities) == 1:
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"- Selected entity doesn't have components to deliver."
|
||||
)
|
||||
})
|
||||
else:
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"- Selected entities don't have common components."
|
||||
)
|
||||
})
|
||||
|
||||
# Add message if delivery anatomies are not set
|
||||
if not new_anatomies:
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"- `\"delivery\"` anatomy key is not set in config."
|
||||
)
|
||||
})
|
||||
|
||||
# Skip if there are any data shortcomings
|
||||
if skipped:
|
||||
return {
|
||||
"items": items,
|
||||
"title": title
|
||||
}
|
||||
|
||||
items.append({
|
||||
"value": "<h1>Choose Components to deliver</h1>",
|
||||
"type": "label"
|
||||
})
|
||||
|
||||
for component in components:
|
||||
items.append({
|
||||
"type": "boolean",
|
||||
"value": False,
|
||||
"label": component,
|
||||
"name": component
|
||||
})
|
||||
|
||||
items.append(item_splitter)
|
||||
|
||||
items.append({
|
||||
"value": "<h2>Location for delivery</h2>",
|
||||
"type": "label"
|
||||
})
|
||||
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"<i>NOTE: It is possible to replace `root` key in anatomy.</i>"
|
||||
)
|
||||
})
|
||||
|
||||
items.append({
|
||||
"type": "text",
|
||||
"name": "__location_path__",
|
||||
"empty_text": "Type location path here...(Optional)"
|
||||
})
|
||||
|
||||
items.append(item_splitter)
|
||||
|
||||
items.append({
|
||||
"value": "<h2>Anatomy of delivery files</h2>",
|
||||
"type": "label"
|
||||
})
|
||||
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"<p><i>NOTE: These can be set in Anatomy.yaml"
|
||||
" within `delivery` key.</i></p>"
|
||||
)
|
||||
})
|
||||
|
||||
items.append({
|
||||
"type": "enumerator",
|
||||
"name": "__new_anatomies__",
|
||||
"data": new_anatomies,
|
||||
"value": first
|
||||
})
|
||||
|
||||
return {
|
||||
"items": items,
|
||||
"title": title
|
||||
}
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
if "values" not in event["data"]:
|
||||
return
|
||||
|
||||
self.report_items = collections.defaultdict(list)
|
||||
|
||||
values = event["data"]["values"]
|
||||
skipped = values.pop("__skipped__")
|
||||
if skipped:
|
||||
return None
|
||||
|
||||
component_names = []
|
||||
location_path = values.pop("__location_path__")
|
||||
anatomy_name = values.pop("__new_anatomies__")
|
||||
project_name = values.pop("__project_name__")
|
||||
|
||||
for key, value in values.items():
|
||||
if value is True:
|
||||
component_names.append(key)
|
||||
|
||||
if not component_names:
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Not selected components to deliver."
|
||||
}
|
||||
|
||||
location_path = location_path.strip()
|
||||
if location_path:
|
||||
location_path = os.path.normpath(location_path)
|
||||
if not os.path.exists(location_path):
|
||||
return {
|
||||
"success": False,
|
||||
"message": (
|
||||
"Entered location path does not exists. \"{}\""
|
||||
).format(location_path)
|
||||
}
|
||||
|
||||
self.db_con.install()
|
||||
self.db_con.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
repres_to_deliver = []
|
||||
for entity in entities:
|
||||
asset = entity["asset"]
|
||||
subset_name = asset["name"]
|
||||
version = entity["version"]
|
||||
|
||||
parent = asset["parent"]
|
||||
parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
|
||||
if parent_mongo_id:
|
||||
parent_mongo_id = ObjectId(parent_mongo_id)
|
||||
else:
|
||||
asset_ent = self.db_con.find_one({
|
||||
"type": "asset",
|
||||
"data.ftrackId": parent["id"]
|
||||
})
|
||||
if not asset_ent:
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in parent["link"]]
|
||||
)
|
||||
msg = "Not synchronized entities to avalon"
|
||||
self.report_items[msg].append(ent_path)
|
||||
self.log.warning("{} <{}>".format(msg, ent_path))
|
||||
continue
|
||||
|
||||
parent_mongo_id = asset_ent["_id"]
|
||||
|
||||
subset_ent = self.db_con.find_one({
|
||||
"type": "subset",
|
||||
"parent": parent_mongo_id,
|
||||
"name": subset_name
|
||||
})
|
||||
|
||||
version_ent = self.db_con.find_one({
|
||||
"type": "version",
|
||||
"name": version,
|
||||
"parent": subset_ent["_id"]
|
||||
})
|
||||
|
||||
repre_ents = self.db_con.find({
|
||||
"type": "representation",
|
||||
"parent": version_ent["_id"]
|
||||
})
|
||||
|
||||
repres_by_name = {}
|
||||
for repre in repre_ents:
|
||||
repre_name = repre["name"]
|
||||
repres_by_name[repre_name] = repre
|
||||
|
||||
for component in entity["components"]:
|
||||
comp_name = component["name"]
|
||||
if comp_name not in component_names:
|
||||
continue
|
||||
|
||||
repre = repres_by_name.get(comp_name)
|
||||
repres_to_deliver.append(repre)
|
||||
|
||||
if not location_path:
|
||||
location_path = os.environ.get("AVALON_PROJECTS") or ""
|
||||
|
||||
print(location_path)
|
||||
|
||||
anatomy = Anatomy(project_name)
|
||||
for repre in repres_to_deliver:
|
||||
# Get destination repre path
|
||||
anatomy_data = copy.deepcopy(repre["context"])
|
||||
anatomy_data["root"] = location_path
|
||||
|
||||
anatomy_filled = anatomy.format(anatomy_data)
|
||||
test_path = (
|
||||
anatomy_filled
|
||||
.get("delivery", {})
|
||||
.get(anatomy_name)
|
||||
)
|
||||
|
||||
if not test_path:
|
||||
msg = (
|
||||
"Missing keys in Representation's context"
|
||||
" for anatomy template \"{}\"."
|
||||
).format(anatomy_name)
|
||||
|
||||
all_anatomies = anatomy.format_all(anatomy_data)
|
||||
result = None
|
||||
for anatomies in all_anatomies.values():
|
||||
for key, temp in anatomies.get("delivery", {}).items():
|
||||
if key != anatomy_name:
|
||||
continue
|
||||
|
||||
result = temp
|
||||
break
|
||||
|
||||
# TODO log error! - missing keys in anatomy
|
||||
if result:
|
||||
missing_keys = [
|
||||
key[1] for key in string.Formatter().parse(result)
|
||||
if key[1] is not None
|
||||
]
|
||||
else:
|
||||
missing_keys = ["unknown"]
|
||||
|
||||
keys = ", ".join(missing_keys)
|
||||
sub_msg = (
|
||||
"Representation: {}<br>- Missing keys: \"{}\"<br>"
|
||||
).format(str(repre["_id"]), keys)
|
||||
self.report_items[msg].append(sub_msg)
|
||||
self.log.warning(
|
||||
"{} Representation: \"{}\" Filled: <{}>".format(
|
||||
msg, str(repre["_id"]), str(result)
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
# Get source repre path
|
||||
frame = repre['context'].get('frame')
|
||||
|
||||
if frame:
|
||||
repre["context"]["frame"] = len(str(frame)) * "#"
|
||||
|
||||
repre_path = self.path_from_represenation(repre)
|
||||
# TODO add backup solution where root of path from component
|
||||
# is repalced with AVALON_PROJECTS root
|
||||
if not frame:
|
||||
self.process_single_file(
|
||||
repre_path, anatomy, anatomy_name, anatomy_data
|
||||
)
|
||||
|
||||
else:
|
||||
self.process_sequence(
|
||||
repre_path, anatomy, anatomy_name, anatomy_data
|
||||
)
|
||||
|
||||
self.db_con.uninstall()
|
||||
|
||||
return self.report()
|
||||
|
||||
def process_single_file(
|
||||
self, repre_path, anatomy, anatomy_name, anatomy_data
|
||||
):
|
||||
anatomy_filled = anatomy.format(anatomy_data)
|
||||
delivery_path = anatomy_filled["delivery"][anatomy_name]
|
||||
delivery_folder = os.path.dirname(delivery_path)
|
||||
if not os.path.exists(delivery_folder):
|
||||
os.makedirs(delivery_folder)
|
||||
|
||||
self.copy_file(repre_path, delivery_path)
|
||||
|
||||
def process_sequence(
|
||||
self, repre_path, anatomy, anatomy_name, anatomy_data
|
||||
):
|
||||
dir_path, file_name = os.path.split(str(repre_path))
|
||||
|
||||
base_name, ext = os.path.splitext(file_name)
|
||||
file_name_items = None
|
||||
if "#" in base_name:
|
||||
file_name_items = [part for part in base_name.split("#") if part]
|
||||
|
||||
elif "%" in base_name:
|
||||
file_name_items = base_name.split("%")
|
||||
|
||||
if not file_name_items:
|
||||
msg = "Source file was not found"
|
||||
self.report_items[msg].append(repre_path)
|
||||
self.log.warning("{} <{}>".format(msg, repre_path))
|
||||
return
|
||||
|
||||
src_collections, remainder = clique.assemble(os.listdir(dir_path))
|
||||
src_collection = None
|
||||
for col in src_collections:
|
||||
if col.tail != ext:
|
||||
continue
|
||||
|
||||
# skip if collection don't have same basename
|
||||
if not col.head.startswith(file_name_items[0]):
|
||||
continue
|
||||
|
||||
src_collection = col
|
||||
break
|
||||
|
||||
if src_collection is None:
|
||||
# TODO log error!
|
||||
msg = "Source collection of files was not found"
|
||||
self.report_items[msg].append(repre_path)
|
||||
self.log.warning("{} <{}>".format(msg, repre_path))
|
||||
return
|
||||
|
||||
frame_indicator = "@####@"
|
||||
|
||||
anatomy_data["frame"] = frame_indicator
|
||||
anatomy_filled = anatomy.format(anatomy_data)
|
||||
|
||||
delivery_path = anatomy_filled["delivery"][anatomy_name]
|
||||
print(delivery_path)
|
||||
delivery_folder = os.path.dirname(delivery_path)
|
||||
dst_head, dst_tail = delivery_path.split(frame_indicator)
|
||||
dst_padding = src_collection.padding
|
||||
dst_collection = clique.Collection(
|
||||
head=dst_head,
|
||||
tail=dst_tail,
|
||||
padding=dst_padding
|
||||
)
|
||||
|
||||
if not os.path.exists(delivery_folder):
|
||||
os.makedirs(delivery_folder)
|
||||
|
||||
src_head = src_collection.head
|
||||
src_tail = src_collection.tail
|
||||
for index in src_collection.indexes:
|
||||
src_padding = src_collection.format("{padding}") % index
|
||||
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
|
||||
src = os.path.normpath(
|
||||
os.path.join(dir_path, src_file_name)
|
||||
)
|
||||
|
||||
dst_padding = dst_collection.format("{padding}") % index
|
||||
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
|
||||
|
||||
self.copy_file(src, dst)
|
||||
|
||||
def path_from_represenation(self, representation):
|
||||
try:
|
||||
template = representation["data"]["template"]
|
||||
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
try:
|
||||
context = representation["context"]
|
||||
context["root"] = os.environ.get("AVALON_PROJECTS") or ""
|
||||
path = pipeline.format_template_with_optional_keys(
|
||||
context, template
|
||||
)
|
||||
|
||||
except KeyError:
|
||||
# Template references unavailable data
|
||||
return None
|
||||
|
||||
return os.path.normpath(path)
|
||||
|
||||
def copy_file(self, src_path, dst_path):
|
||||
if os.path.exists(dst_path):
|
||||
return
|
||||
try:
|
||||
filelink.create(
|
||||
src_path,
|
||||
dst_path,
|
||||
filelink.HARDLINK
|
||||
)
|
||||
except OSError:
|
||||
shutil.copyfile(src_path, dst_path)
|
||||
|
||||
def report(self):
|
||||
items = []
|
||||
title = "Delivery report"
|
||||
for msg, _items in self.report_items.items():
|
||||
if not _items:
|
||||
continue
|
||||
|
||||
if items:
|
||||
items.append({"type": "label", "value": "---"})
|
||||
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "# {}".format(msg)
|
||||
})
|
||||
if not isinstance(_items, (list, tuple)):
|
||||
_items = [_items]
|
||||
__items = []
|
||||
for item in _items:
|
||||
__items.append(str(item))
|
||||
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": '<p>{}</p>'.format("<br>".join(__items))
|
||||
})
|
||||
|
||||
if not items:
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Delivery Finished"
|
||||
}
|
||||
|
||||
return {
|
||||
"items": items,
|
||||
"title": title,
|
||||
"success": False,
|
||||
"message": "Delivery Finished"
|
||||
}
|
||||
|
||||
def register(session, plugins_presets={}):
|
||||
'''Register plugin. Called when used as an plugin.'''
|
||||
|
||||
Delivery(session, plugins_presets).register()
|
||||
|
|
@ -28,7 +28,7 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
ignore_entTypes = [
|
||||
"socialfeed", "socialnotification", "note",
|
||||
"assetversion", "job", "user", "reviewsessionobject", "timer",
|
||||
"timelog", "auth_userrole"
|
||||
"timelog", "auth_userrole", "appointment"
|
||||
]
|
||||
ignore_ent_types = ["Milestone"]
|
||||
ignore_keys = ["statusid"]
|
||||
|
|
@ -1438,9 +1438,11 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if attr["entity_type"] != ent_info["entityType"]:
|
||||
continue
|
||||
|
||||
if ent_info["entityType"] != "show":
|
||||
if attr["object_type_id"] != ent_info["objectTypeId"]:
|
||||
continue
|
||||
if (
|
||||
ent_info["entityType"] == "task" and
|
||||
attr["object_type_id"] != ent_info["objectTypeId"]
|
||||
):
|
||||
continue
|
||||
|
||||
configuration_id = attr["id"]
|
||||
entity_type_conf_ids[entity_type] = configuration_id
|
||||
|
|
@ -1712,7 +1714,8 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
|
||||
if ca_ent_type == "show":
|
||||
cust_attrs_by_obj_id[ca_ent_type][key] = cust_attr
|
||||
else:
|
||||
|
||||
elif ca_ent_type == "task":
|
||||
obj_id = cust_attr["object_type_id"]
|
||||
cust_attrs_by_obj_id[obj_id][key] = cust_attr
|
||||
|
||||
|
|
|
|||
|
|
@ -314,6 +314,9 @@ class SyncEntitiesFactory:
|
|||
self.log.warning(msg)
|
||||
return {"success": False, "message": msg}
|
||||
|
||||
self.log.debug((
|
||||
"*** Synchronization initialization started <{}>."
|
||||
).format(project_full_name))
|
||||
# Check if `avalon_mongo_id` custom attribute exist or is accessible
|
||||
if CustAttrIdKey not in ft_project["custom_attributes"]:
|
||||
items = []
|
||||
|
|
@ -699,7 +702,7 @@ class SyncEntitiesFactory:
|
|||
if ca_ent_type == "show":
|
||||
avalon_attrs[ca_ent_type][key] = cust_attr["default"]
|
||||
avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"]
|
||||
else:
|
||||
elif ca_ent_type == "task":
|
||||
obj_id = cust_attr["object_type_id"]
|
||||
avalon_attrs[obj_id][key] = cust_attr["default"]
|
||||
avalon_attrs_ca_id[obj_id][key] = cust_attr["id"]
|
||||
|
|
@ -708,7 +711,7 @@ class SyncEntitiesFactory:
|
|||
if ca_ent_type == "show":
|
||||
attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"]
|
||||
attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"]
|
||||
else:
|
||||
elif ca_ent_type == "task":
|
||||
obj_id = cust_attr["object_type_id"]
|
||||
attrs_per_entity_type[obj_id][key] = cust_attr["default"]
|
||||
attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"]
|
||||
|
|
|
|||
|
|
@ -18,13 +18,16 @@ def _subprocess(*args, **kwargs):
|
|||
"""Convenience method for getting output errors for subprocess."""
|
||||
|
||||
# make sure environment contains only strings
|
||||
filtered_env = {k: str(v) for k, v in os.environ.items()}
|
||||
if not kwargs.get("env"):
|
||||
filtered_env = {k: str(v) for k, v in os.environ.items()}
|
||||
else:
|
||||
filtered_env = {k: str(v) for k, v in kwargs.get("env").items()}
|
||||
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.STDOUT)
|
||||
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
|
||||
kwargs['env'] = kwargs.get('env',filtered_env)
|
||||
kwargs['env'] = filtered_env
|
||||
|
||||
proc = subprocess.Popen(*args, **kwargs)
|
||||
|
||||
|
|
|
|||
424
pype/nuke/lib.py
424
pype/nuke/lib.py
|
|
@ -707,9 +707,11 @@ class WorkfileSettings(object):
|
|||
frame_start = int(data["frameStart"]) - handle_start
|
||||
frame_end = int(data["frameEnd"]) + handle_end
|
||||
|
||||
self._root_node["lock_range"].setValue(False)
|
||||
self._root_node["fps"].setValue(fps)
|
||||
self._root_node["first_frame"].setValue(frame_start)
|
||||
self._root_node["last_frame"].setValue(frame_end)
|
||||
self._root_node["lock_range"].setValue(True)
|
||||
|
||||
# setting active viewers
|
||||
try:
|
||||
|
|
@ -1197,13 +1199,13 @@ class BuildWorkfile(WorkfileSettings):
|
|||
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
|
||||
|
||||
|
||||
class Exporter_review_lut:
|
||||
class ExporterReview:
|
||||
"""
|
||||
Generator object for review lut from Nuke
|
||||
Base class object for generating review data from Nuke
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
"""
|
||||
_temp_nodes = []
|
||||
|
|
@ -1213,94 +1215,15 @@ class Exporter_review_lut:
|
|||
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance,
|
||||
name=None,
|
||||
ext=None,
|
||||
cube_size=None,
|
||||
lut_size=None,
|
||||
lut_style=None):
|
||||
instance
|
||||
):
|
||||
|
||||
self.log = klass.log
|
||||
self.instance = instance
|
||||
|
||||
self.name = name or "baked_lut"
|
||||
self.ext = ext or "cube"
|
||||
self.cube_size = cube_size or 32
|
||||
self.lut_size = lut_size or 1024
|
||||
self.lut_style = lut_style or "linear"
|
||||
|
||||
self.stagingDir = self.instance.data["stagingDir"]
|
||||
self.path_in = self.instance.data.get("path", None)
|
||||
self.staging_dir = self.instance.data["stagingDir"]
|
||||
self.collection = self.instance.data.get("collection", None)
|
||||
|
||||
# set frame start / end and file name to self
|
||||
self.get_file_info()
|
||||
|
||||
self.log.info("File info was set...")
|
||||
|
||||
self.file = self.fhead + self.name + ".{}".format(self.ext)
|
||||
self.path = os.path.join(self.stagingDir, self.file).replace("\\", "/")
|
||||
|
||||
def generate_lut(self):
|
||||
# ---------- start nodes creation
|
||||
|
||||
# CMSTestPattern
|
||||
cms_node = nuke.createNode("CMSTestPattern")
|
||||
cms_node["cube_size"].setValue(self.cube_size)
|
||||
# connect
|
||||
self._temp_nodes.append(cms_node)
|
||||
self.previous_node = cms_node
|
||||
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
|
||||
|
||||
# Node View Process
|
||||
ipn = self.get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
|
||||
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
|
||||
|
||||
# GenerateLUT
|
||||
gen_lut_node = nuke.createNode("GenerateLUT")
|
||||
gen_lut_node["file"].setValue(self.path)
|
||||
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
|
||||
gen_lut_node["lut1d"].setValue(self.lut_size)
|
||||
gen_lut_node["style1d"].setValue(self.lut_style)
|
||||
# connect
|
||||
gen_lut_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(gen_lut_node)
|
||||
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
|
||||
|
||||
# ---------- end nodes creation
|
||||
|
||||
# Export lut file
|
||||
nuke.execute(
|
||||
gen_lut_node.name(),
|
||||
int(self.first_frame),
|
||||
int(self.first_frame))
|
||||
|
||||
self.log.info("Exported...")
|
||||
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data()
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
||||
# ---------- Clean up
|
||||
for node in self._temp_nodes:
|
||||
nuke.delete(node)
|
||||
self.log.info("Deleted nodes...")
|
||||
|
||||
return self.data
|
||||
|
||||
def get_file_info(self):
|
||||
if self.collection:
|
||||
self.log.debug("Collection: `{}`".format(self.collection))
|
||||
|
|
@ -1313,7 +1236,7 @@ class Exporter_review_lut:
|
|||
self.first_frame = min(self.collection.indexes)
|
||||
self.last_frame = max(self.collection.indexes)
|
||||
else:
|
||||
self.fname = os.path.basename(self.instance.data.get("path", None))
|
||||
self.fname = os.path.basename(self.path_in)
|
||||
self.fhead = os.path.splitext(self.fname)[0] + "."
|
||||
self.first_frame = self.instance.data.get("frameStart", None)
|
||||
self.last_frame = self.instance.data.get("frameEnd", None)
|
||||
|
|
@ -1321,17 +1244,26 @@ class Exporter_review_lut:
|
|||
if "#" in self.fhead:
|
||||
self.fhead = self.fhead.replace("#", "")[:-1]
|
||||
|
||||
def get_representation_data(self):
|
||||
def get_representation_data(self, tags=None, range=False):
|
||||
add_tags = []
|
||||
if tags:
|
||||
add_tags = tags
|
||||
|
||||
repre = {
|
||||
'name': self.name,
|
||||
'ext': self.ext,
|
||||
'files': self.file,
|
||||
"stagingDir": self.stagingDir,
|
||||
"stagingDir": self.staging_dir,
|
||||
"anatomy_template": "publish",
|
||||
"tags": [self.name.replace("_", "-")]
|
||||
"tags": [self.name.replace("_", "-")] + add_tags
|
||||
}
|
||||
|
||||
if range:
|
||||
repre.update({
|
||||
"frameStart": self.first_frame,
|
||||
"frameEnd": self.last_frame,
|
||||
})
|
||||
|
||||
self.data["representations"].append(repre)
|
||||
|
||||
def get_view_process_node(self):
|
||||
|
|
@ -1366,6 +1298,251 @@ class Exporter_review_lut:
|
|||
|
||||
return ipn
|
||||
|
||||
def clean_nodes(self):
|
||||
for node in self._temp_nodes:
|
||||
nuke.delete(node)
|
||||
self.log.info("Deleted nodes...")
|
||||
|
||||
|
||||
class ExporterReviewLut(ExporterReview):
|
||||
"""
|
||||
Generator object for review lut from Nuke
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
|
||||
"""
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance,
|
||||
name=None,
|
||||
ext=None,
|
||||
cube_size=None,
|
||||
lut_size=None,
|
||||
lut_style=None):
|
||||
# initialize parent class
|
||||
ExporterReview.__init__(self, klass, instance)
|
||||
|
||||
# deal with now lut defined in viewer lut
|
||||
if hasattr(klass, "viewer_lut_raw"):
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
else:
|
||||
self.viewer_lut_raw = False
|
||||
|
||||
self.name = name or "baked_lut"
|
||||
self.ext = ext or "cube"
|
||||
self.cube_size = cube_size or 32
|
||||
self.lut_size = lut_size or 1024
|
||||
self.lut_style = lut_style or "linear"
|
||||
|
||||
# set frame start / end and file name to self
|
||||
self.get_file_info()
|
||||
|
||||
self.log.info("File info was set...")
|
||||
|
||||
self.file = self.fhead + self.name + ".{}".format(self.ext)
|
||||
self.path = os.path.join(
|
||||
self.staging_dir, self.file).replace("\\", "/")
|
||||
|
||||
def generate_lut(self):
|
||||
# ---------- start nodes creation
|
||||
|
||||
# CMSTestPattern
|
||||
cms_node = nuke.createNode("CMSTestPattern")
|
||||
cms_node["cube_size"].setValue(self.cube_size)
|
||||
# connect
|
||||
self._temp_nodes.append(cms_node)
|
||||
self.previous_node = cms_node
|
||||
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
|
||||
|
||||
# Node View Process
|
||||
ipn = self.get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
|
||||
|
||||
# GenerateLUT
|
||||
gen_lut_node = nuke.createNode("GenerateLUT")
|
||||
gen_lut_node["file"].setValue(self.path)
|
||||
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
|
||||
gen_lut_node["lut1d"].setValue(self.lut_size)
|
||||
gen_lut_node["style1d"].setValue(self.lut_style)
|
||||
# connect
|
||||
gen_lut_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(gen_lut_node)
|
||||
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
|
||||
|
||||
# ---------- end nodes creation
|
||||
|
||||
# Export lut file
|
||||
nuke.execute(
|
||||
gen_lut_node.name(),
|
||||
int(self.first_frame),
|
||||
int(self.first_frame))
|
||||
|
||||
self.log.info("Exported...")
|
||||
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data()
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
||||
# ---------- Clean up
|
||||
self.clean_nodes()
|
||||
|
||||
return self.data
|
||||
|
||||
|
||||
class ExporterReviewMov(ExporterReview):
|
||||
"""
|
||||
Metaclass for generating review mov files
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
"""
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance,
|
||||
name=None,
|
||||
ext=None,
|
||||
):
|
||||
# initialize parent class
|
||||
ExporterReview.__init__(self, klass, instance)
|
||||
|
||||
# passing presets for nodes to self
|
||||
if hasattr(klass, "nodes"):
|
||||
self.nodes = klass.nodes
|
||||
else:
|
||||
self.nodes = {}
|
||||
|
||||
# deal with now lut defined in viewer lut
|
||||
if hasattr(klass, "viewer_lut_raw"):
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
else:
|
||||
self.viewer_lut_raw = False
|
||||
|
||||
self.name = name or "baked"
|
||||
self.ext = ext or "mov"
|
||||
|
||||
# set frame start / end and file name to self
|
||||
self.get_file_info()
|
||||
|
||||
self.log.info("File info was set...")
|
||||
|
||||
self.file = self.fhead + self.name + ".{}".format(self.ext)
|
||||
self.path = os.path.join(
|
||||
self.staging_dir, self.file).replace("\\", "/")
|
||||
|
||||
def render(self, render_node_name):
|
||||
self.log.info("Rendering... ")
|
||||
# Render Write node
|
||||
nuke.execute(
|
||||
render_node_name,
|
||||
int(self.first_frame),
|
||||
int(self.last_frame))
|
||||
|
||||
self.log.info("Rendered...")
|
||||
|
||||
def save_file(self):
|
||||
with anlib.maintained_selection():
|
||||
self.log.info("Saving nodes as file... ")
|
||||
# select temp nodes
|
||||
anlib.select_nodes(self._temp_nodes)
|
||||
# create nk path
|
||||
path = os.path.splitext(self.path)[0] + ".nk"
|
||||
# save file to the path
|
||||
nuke.nodeCopy(path)
|
||||
|
||||
self.log.info("Nodes exported...")
|
||||
return path
|
||||
|
||||
def generate_mov(self, farm=False):
|
||||
# ---------- start nodes creation
|
||||
|
||||
# Read node
|
||||
r_node = nuke.createNode("Read")
|
||||
r_node["file"].setValue(self.path_in)
|
||||
r_node["first"].setValue(self.first_frame)
|
||||
r_node["origfirst"].setValue(self.first_frame)
|
||||
r_node["last"].setValue(self.last_frame)
|
||||
r_node["origlast"].setValue(self.last_frame)
|
||||
# connect
|
||||
self._temp_nodes.append(r_node)
|
||||
self.previous_node = r_node
|
||||
self.log.debug("Read... `{}`".format(self._temp_nodes))
|
||||
|
||||
# View Process node
|
||||
ipn = self.get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay node
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
|
||||
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
self.log.debug("Path: {}".format(self.path))
|
||||
self.instance.data["baked_colorspace_movie"] = self.path
|
||||
write_node["file"].setValue(self.path)
|
||||
write_node["file_type"].setValue(self.ext)
|
||||
write_node["raw"].setValue(1)
|
||||
# connect
|
||||
write_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(write_node)
|
||||
self.log.debug("Write... `{}`".format(self._temp_nodes))
|
||||
|
||||
# ---------- end nodes creation
|
||||
|
||||
# ---------- render or save to nk
|
||||
if farm:
|
||||
path_nk = self.save_file()
|
||||
self.data.update({
|
||||
"bakeScriptPath": path_nk,
|
||||
"bakeWriteNodeName": write_node.name(),
|
||||
"bakeRenderPath": self.path
|
||||
})
|
||||
else:
|
||||
self.render(write_node.name())
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data(
|
||||
tags=["review", "delete"],
|
||||
range=True
|
||||
)
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
||||
#---------- Clean up
|
||||
self.clean_nodes()
|
||||
|
||||
return self.data
|
||||
|
||||
|
||||
def get_dependent_nodes(nodes):
|
||||
"""Get all dependent nodes connected to the list of nodes.
|
||||
|
||||
|
|
@ -1401,3 +1578,70 @@ def get_dependent_nodes(nodes):
|
|||
})
|
||||
|
||||
return connections_in, connections_out
|
||||
|
||||
|
||||
def find_free_space_to_paste_nodes(
|
||||
nodes,
|
||||
group=nuke.root(),
|
||||
direction="right",
|
||||
offset=300):
|
||||
"""
|
||||
For getting coordinates in DAG (node graph) for placing new nodes
|
||||
|
||||
Arguments:
|
||||
nodes (list): list of nuke.Node objects
|
||||
group (nuke.Node) [optional]: object in which context it is
|
||||
direction (str) [optional]: where we want it to be placed
|
||||
[left, right, top, bottom]
|
||||
offset (int) [optional]: what offset it is from rest of nodes
|
||||
|
||||
Returns:
|
||||
xpos (int): x coordinace in DAG
|
||||
ypos (int): y coordinace in DAG
|
||||
"""
|
||||
if len(nodes) == 0:
|
||||
return 0, 0
|
||||
|
||||
group_xpos = list()
|
||||
group_ypos = list()
|
||||
|
||||
# get local coordinates of all nodes
|
||||
nodes_xpos = [n.xpos() for n in nodes] + \
|
||||
[n.xpos() + n.screenWidth() for n in nodes]
|
||||
|
||||
nodes_ypos = [n.ypos() for n in nodes] + \
|
||||
[n.ypos() + n.screenHeight() for n in nodes]
|
||||
|
||||
# get complete screen size of all nodes to be placed in
|
||||
nodes_screen_width = max(nodes_xpos) - min(nodes_xpos)
|
||||
nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos)
|
||||
|
||||
# get screen size (r,l,t,b) of all nodes in `group`
|
||||
with group:
|
||||
group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \
|
||||
[n.xpos() + n.screenWidth() for n in nuke.allNodes()
|
||||
if n not in nodes]
|
||||
group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \
|
||||
[n.ypos() + n.screenHeight() for n in nuke.allNodes()
|
||||
if n not in nodes]
|
||||
|
||||
# calc output left
|
||||
if direction in "left":
|
||||
xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset)
|
||||
ypos = min(group_ypos)
|
||||
return xpos, ypos
|
||||
# calc output right
|
||||
if direction in "right":
|
||||
xpos = max(group_xpos) + abs(offset)
|
||||
ypos = min(group_ypos)
|
||||
return xpos, ypos
|
||||
# calc output top
|
||||
if direction in "top":
|
||||
xpos = min(group_xpos)
|
||||
ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset)
|
||||
return xpos, ypos
|
||||
# calc output bottom
|
||||
if direction in "bottom":
|
||||
xpos = min(group_xpos)
|
||||
ypos = max(group_ypos) + abs(offset)
|
||||
return xpos, ypos
|
||||
|
|
|
|||
|
|
@ -22,19 +22,16 @@ def has_unsaved_changes():
|
|||
|
||||
|
||||
def save_file(filepath):
|
||||
file = os.path.basename(filepath)
|
||||
project = hiero.core.projects()[-1]
|
||||
|
||||
# close `Untitled` project
|
||||
if "Untitled" not in project.name():
|
||||
log.info("Saving project: `{}`".format(project.name()))
|
||||
if project:
|
||||
log.info("Saving project: `{}` as '{}'".format(project.name(), file))
|
||||
project.saveAs(filepath)
|
||||
elif not project:
|
||||
else:
|
||||
log.info("Creating new project...")
|
||||
project = hiero.core.newProject()
|
||||
project.saveAs(filepath)
|
||||
else:
|
||||
log.info("Dropping `Untitled` project...")
|
||||
return
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
|
|
|
|||
|
|
@ -188,14 +188,18 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
# Adding Custom Attributes
|
||||
for attr, val in assetversion_cust_attrs.items():
|
||||
if attr in assetversion_entity["custom_attributes"]:
|
||||
assetversion_entity["custom_attributes"][attr] = val
|
||||
continue
|
||||
try:
|
||||
assetversion_entity["custom_attributes"][attr] = val
|
||||
session.commit()
|
||||
continue
|
||||
except Exception:
|
||||
session.rollback()
|
||||
|
||||
self.log.warning((
|
||||
"Custom Attrubute \"{0}\""
|
||||
" is not available for AssetVersion."
|
||||
" Can't set it's value to: \"{1}\""
|
||||
).format(attr, str(val)))
|
||||
" is not available for AssetVersion <{1}>."
|
||||
" Can't set it's value to: \"{2}\""
|
||||
).format(attr, assetversion_entity["id"], str(val)))
|
||||
|
||||
# Have to commit the version and asset, because location can't
|
||||
# determine the final location without.
|
||||
|
|
|
|||
|
|
@ -54,10 +54,6 @@ def collect(root,
|
|||
patterns=[pattern],
|
||||
minimum_items=1)
|
||||
|
||||
# Ignore any remainders
|
||||
if remainder:
|
||||
print("Skipping remainder {}".format(remainder))
|
||||
|
||||
# Exclude any frames outside start and end frame.
|
||||
for collection in collections:
|
||||
for index in list(collection.indexes):
|
||||
|
|
@ -71,7 +67,7 @@ def collect(root,
|
|||
# Keep only collections that have at least a single frame
|
||||
collections = [c for c in collections if c.indexes]
|
||||
|
||||
return collections
|
||||
return collections, remainder
|
||||
|
||||
|
||||
class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
||||
|
|
@ -119,8 +115,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
try:
|
||||
data = json.load(f)
|
||||
except Exception as exc:
|
||||
self.log.error("Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc))
|
||||
self.log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
raise
|
||||
|
||||
cwd = os.path.dirname(path)
|
||||
|
|
@ -148,11 +146,14 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
os.environ.update(session)
|
||||
instance = metadata.get("instance")
|
||||
if instance:
|
||||
# here is the place to add ability for nuke noninteractive
|
||||
# ______________________________________
|
||||
instance_family = instance.get("family")
|
||||
pixel_aspect = instance.get("pixelAspect", 1)
|
||||
resolution_width = instance.get("resolutionWidth", 1920)
|
||||
resolution_height = instance.get("resolutionHeight", 1080)
|
||||
lut_path = instance.get("lutPath", None)
|
||||
|
||||
|
||||
else:
|
||||
# Search in directory
|
||||
data = dict()
|
||||
|
|
@ -163,14 +164,17 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
if regex:
|
||||
self.log.info("Using regex: {}".format(regex))
|
||||
|
||||
collections = collect(root=root,
|
||||
regex=regex,
|
||||
exclude_regex=data.get("exclude_regex"),
|
||||
frame_start=data.get("frameStart"),
|
||||
frame_end=data.get("frameEnd"))
|
||||
collections, remainder = collect(
|
||||
root=root,
|
||||
regex=regex,
|
||||
exclude_regex=data.get("exclude_regex"),
|
||||
frame_start=data.get("frameStart"),
|
||||
frame_end=data.get("frameEnd"),
|
||||
)
|
||||
|
||||
self.log.info("Found collections: {}".format(collections))
|
||||
|
||||
"""
|
||||
if data.get("subset"):
|
||||
# If subset is provided for this json then it must be a single
|
||||
# collection.
|
||||
|
|
@ -178,79 +182,190 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
|||
self.log.error("Forced subset can only work with a single "
|
||||
"found sequence")
|
||||
raise RuntimeError("Invalid sequence")
|
||||
"""
|
||||
|
||||
fps = data.get("fps", 25)
|
||||
|
||||
if data.get("user"):
|
||||
context.data["user"] = data["user"]
|
||||
|
||||
# Get family from the data
|
||||
families = data.get("families", ["render"])
|
||||
if "render" not in families:
|
||||
families.append("render")
|
||||
if "ftrack" not in families:
|
||||
families.append("ftrack")
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
if "write" in instance_family:
|
||||
families.append("write")
|
||||
|
||||
for collection in collections:
|
||||
instance = context.create_instance(str(collection))
|
||||
self.log.info("Collection: %s" % list(collection))
|
||||
if data.get("attachTo"):
|
||||
# we need to attach found collections to existing
|
||||
# subset version as review represenation.
|
||||
|
||||
# Ensure each instance gets a unique reference to the data
|
||||
for attach in data.get("attachTo"):
|
||||
self.log.info(
|
||||
"Attaching render {}:v{}".format(
|
||||
attach["subset"], attach["version"]))
|
||||
instance = context.create_instance(
|
||||
attach["subset"])
|
||||
instance.data.update(
|
||||
{
|
||||
"name": attach["subset"],
|
||||
"version": attach["version"],
|
||||
"family": 'review',
|
||||
"families": ['review', 'ftrack'],
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": data.get("frameStart"),
|
||||
"frameEnd": data.get("frameEnd"),
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect
|
||||
})
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
for collection in collections:
|
||||
self.log.info(
|
||||
" - adding representation: {}".format(
|
||||
str(collection))
|
||||
)
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
elif data.get("subset"):
|
||||
# if we have subset - add all collections and known
|
||||
# reminder as representations
|
||||
|
||||
self.log.info(
|
||||
"Adding representations to subset {}".format(
|
||||
data.get("subset")))
|
||||
|
||||
instance = context.create_instance(data.get("subset"))
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
# If no subset provided, get it from collection's head
|
||||
subset = data.get("subset", collection.head.rstrip("_. "))
|
||||
|
||||
# If no start or end frame provided, get it from collection
|
||||
indices = list(collection.indexes)
|
||||
start = data.get("frameStart", indices[0])
|
||||
end = data.get("frameEnd", indices[-1])
|
||||
|
||||
self.log.debug("Collected pixel_aspect:\n"
|
||||
"{}".format(pixel_aspect))
|
||||
self.log.debug("type pixel_aspect:\n"
|
||||
"{}".format(type(pixel_aspect)))
|
||||
|
||||
# root = os.path.normpath(root)
|
||||
# self.log.info("Source: {}}".format(data.get("source", "")))
|
||||
|
||||
ext = list(collection)[0].split('.')[-1]
|
||||
|
||||
instance.data.update({
|
||||
"name": str(collection),
|
||||
"family": families[0], # backwards compatibility / pyblish
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get("asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"source": data.get('source', ''),
|
||||
"pixelAspect": pixel_aspect,
|
||||
})
|
||||
if lut_path:
|
||||
instance.data.update({"lutPath": lut_path})
|
||||
instance.append(collection)
|
||||
instance.context.data['fps'] = fps
|
||||
instance.data.update(
|
||||
{
|
||||
"name": data.get("subset"),
|
||||
"family": families[0],
|
||||
"families": list(families),
|
||||
"subset": data.get("subset"),
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": data.get("frameStart"),
|
||||
"frameEnd": data.get("frameEnd"),
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
}
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': ext,
|
||||
'ext': '{}'.format(ext),
|
||||
'files': list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ['review']
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
for collection in collections:
|
||||
self.log.info(" - {}".format(str(collection)))
|
||||
|
||||
if data.get('user'):
|
||||
context.data["user"] = data['user']
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
self.log.debug("Collected instance:\n"
|
||||
"{}".format(pformat(instance.data)))
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
# process reminders
|
||||
for rem in remainder:
|
||||
# add only known types to representation
|
||||
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
|
||||
self.log.info(" . {}".format(rem))
|
||||
representation = {
|
||||
"name": rem.split(".")[-1],
|
||||
"ext": "{}".format(rem.split(".")[-1]),
|
||||
"files": rem,
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
else:
|
||||
# we have no subset so we take every collection and create one
|
||||
# from it
|
||||
for collection in collections:
|
||||
instance = context.create_instance(str(collection))
|
||||
self.log.info("Creating subset from: %s" % str(collection))
|
||||
|
||||
# Ensure each instance gets a unique reference to the data
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
# If no subset provided, get it from collection's head
|
||||
subset = data.get("subset", collection.head.rstrip("_. "))
|
||||
|
||||
# If no start or end frame provided, get it from collection
|
||||
indices = list(collection.indexes)
|
||||
start = data.get("frameStart", indices[0])
|
||||
end = data.get("frameEnd", indices[-1])
|
||||
|
||||
ext = list(collection)[0].split(".")[-1]
|
||||
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
|
||||
instance.data.update(
|
||||
{
|
||||
"name": str(collection),
|
||||
"family": families[0], # backwards compatibility
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
}
|
||||
)
|
||||
if lut_path:
|
||||
instance.data.update({"lutPath": lut_path})
|
||||
|
||||
instance.append(collection)
|
||||
instance.context.data["fps"] = fps
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -76,7 +76,18 @@ class CollectTemplates(pyblish.api.InstancePlugin):
|
|||
"subset": subset_name,
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy.replace("\\", "/"),
|
||||
"representation": "TEMP"}
|
||||
"representation": "TEMP")}
|
||||
|
||||
resolution_width = instance.data.get("resolutionWidth")
|
||||
resolution_height = instance.data.get("resolutionHeight")
|
||||
fps = instance.data.get("fps")
|
||||
|
||||
if resolution_width:
|
||||
template_data["resolution_width"] = resolution_width
|
||||
if resolution_width:
|
||||
template_data["resolution_height"] = resolution_height
|
||||
if resolution_width:
|
||||
template_data["fps"] = fps
|
||||
|
||||
instance.data["template"] = template
|
||||
instance.data["assumedTemplateData"] = template_data
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
import math
|
||||
import pyblish.api
|
||||
import clique
|
||||
import pype.api
|
||||
|
|
@ -25,14 +24,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
ext_filter = []
|
||||
|
||||
def process(self, instance):
|
||||
to_width = 1920
|
||||
to_height = 1080
|
||||
|
||||
output_profiles = self.outputs or {}
|
||||
|
||||
inst_data = instance.data
|
||||
fps = inst_data.get("fps")
|
||||
start_frame = inst_data.get("frameStart")
|
||||
resolution_height = instance.data.get("resolutionHeight", 1080)
|
||||
resolution_width = instance.data.get("resolutionWidth", 1920)
|
||||
resolution_width = instance.data.get("resolutionWidth", to_width)
|
||||
resolution_height = instance.data.get("resolutionHeight", to_height)
|
||||
pixel_aspect = instance.data.get("pixelAspect", 1)
|
||||
self.log.debug("Families In: `{}`".format(instance.data["families"]))
|
||||
|
||||
|
|
@ -155,13 +156,38 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
# preset's output data
|
||||
output_args.extend(profile.get('output', []))
|
||||
|
||||
# defining image ratios
|
||||
resolution_ratio = float(resolution_width / (
|
||||
resolution_height * pixel_aspect))
|
||||
delivery_ratio = float(to_width) / float(to_height)
|
||||
self.log.debug(resolution_ratio)
|
||||
self.log.debug(delivery_ratio)
|
||||
|
||||
# get scale factor
|
||||
scale_factor = to_height / (
|
||||
resolution_height * pixel_aspect)
|
||||
self.log.debug(scale_factor)
|
||||
|
||||
# letter_box
|
||||
lb = profile.get('letter_box', 0)
|
||||
if lb is not 0:
|
||||
if lb != 0:
|
||||
ffmpet_width = to_width
|
||||
ffmpet_height = to_height
|
||||
if "reformat" not in p_tags:
|
||||
lb /= pixel_aspect
|
||||
if resolution_ratio != delivery_ratio:
|
||||
ffmpet_width = resolution_width
|
||||
ffmpet_height = int(
|
||||
resolution_height * pixel_aspect)
|
||||
else:
|
||||
# TODO: it might still be failing in some cases
|
||||
if resolution_ratio != delivery_ratio:
|
||||
lb /= scale_factor
|
||||
else:
|
||||
lb /= pixel_aspect
|
||||
|
||||
output_args.append(
|
||||
"-filter:v scale=1920x1080:flags=lanczos,setsar=1,drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
|
||||
"-filter:v scale={0}x{1}:flags=lanczos,setsar=1,drawbox=0:0:iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{2})))/2):iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black".format(ffmpet_width, ffmpet_height, lb))
|
||||
|
||||
# In case audio is longer than video.
|
||||
output_args.append("-shortest")
|
||||
|
|
@ -172,22 +198,26 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug("__ pixel_aspect: `{}`".format(pixel_aspect))
|
||||
self.log.debug("__ resolution_width: `{}`".format(resolution_width))
|
||||
self.log.debug("__ resolution_height: `{}`".format(resolution_height))
|
||||
|
||||
# scaling none square pixels and 1920 width
|
||||
if "reformat" in p_tags:
|
||||
width_scale = 1920
|
||||
width_half_pad = 0
|
||||
res_w = int(float(resolution_width) * pixel_aspect)
|
||||
height_half_pad = int((
|
||||
(res_w - 1920) / (
|
||||
res_w * .01) * (
|
||||
1080 * .01)) / 2
|
||||
)
|
||||
height_scale = 1080 - (height_half_pad * 2)
|
||||
if height_scale > 1080:
|
||||
if resolution_ratio < delivery_ratio:
|
||||
self.log.debug("lower then delivery")
|
||||
width_scale = int(to_width * scale_factor)
|
||||
width_half_pad = int((
|
||||
to_width - width_scale)/2)
|
||||
height_scale = to_height
|
||||
height_half_pad = 0
|
||||
height_scale = 1080
|
||||
width_half_pad = (1920 - (float(resolution_width) * (1080 / float(resolution_height))) ) / 2
|
||||
width_scale = int(1920 - (width_half_pad * 2))
|
||||
else:
|
||||
self.log.debug("heigher then delivery")
|
||||
width_scale = to_width
|
||||
width_half_pad = 0
|
||||
scale_factor = float(to_width) / float(resolution_width)
|
||||
self.log.debug(scale_factor)
|
||||
height_scale = int(
|
||||
resolution_height * scale_factor)
|
||||
height_half_pad = int(
|
||||
(to_height - height_scale)/2)
|
||||
|
||||
self.log.debug("__ width_scale: `{}`".format(width_scale))
|
||||
self.log.debug("__ width_half_pad: `{}`".format(width_half_pad))
|
||||
|
|
@ -195,8 +225,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug("__ height_half_pad: `{}`".format(height_half_pad))
|
||||
|
||||
|
||||
scaling_arg = "scale={0}x{1}:flags=lanczos,pad=1920:1080:{2}:{3}:black,setsar=1".format(
|
||||
width_scale, height_scale, width_half_pad, height_half_pad
|
||||
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
|
||||
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
|
||||
)
|
||||
|
||||
vf_back = self.add_video_filter_args(
|
||||
|
|
@ -249,7 +279,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
'files': repr_file,
|
||||
"tags": new_tags,
|
||||
"outputName": name,
|
||||
"codec": codec_args
|
||||
"codec": codec_args,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionWidth": resolution_height
|
||||
})
|
||||
if repre_new.get('preview'):
|
||||
repre_new.pop("preview")
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import errno
|
|||
import pyblish.api
|
||||
from avalon import api, io
|
||||
from avalon.vendor import filelink
|
||||
from pathlib2 import Path
|
||||
# this is needed until speedcopy for linux is fixed
|
||||
if sys.platform == "win32":
|
||||
from speedcopy import copyfile
|
||||
|
|
@ -269,6 +270,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy}
|
||||
|
||||
resolution_width = repre.get("resolutionWidth")
|
||||
resolution_height = repre.get("resolutionHeight")
|
||||
fps = instance.data.get("fps")
|
||||
|
||||
if resolution_width:
|
||||
template_data["resolution_width"] = resolution_width
|
||||
if resolution_width:
|
||||
template_data["resolution_height"] = resolution_height
|
||||
if resolution_width:
|
||||
template_data["fps"] = fps
|
||||
|
||||
files = repre['files']
|
||||
if repre.get('stagingDir'):
|
||||
stagingdir = repre['stagingDir']
|
||||
|
|
@ -468,8 +480,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
Returns:
|
||||
None
|
||||
"""
|
||||
src = os.path.normpath(src)
|
||||
dst = os.path.normpath(dst)
|
||||
|
||||
src = str(Path(src).resolve())
|
||||
drive, _path = os.path.splitdrive(dst)
|
||||
unc = Path(drive).resolve()
|
||||
dst = str(unc / _path)
|
||||
|
||||
self.log.debug("Copying file .. {} -> {}".format(src, dst))
|
||||
dirname = os.path.dirname(dst)
|
||||
|
|
@ -490,6 +505,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
def hardlink_file(self, src, dst):
|
||||
dirname = os.path.dirname(dst)
|
||||
src = Path(src).resolve()
|
||||
drive, _path = os.path.splitdrive(dst)
|
||||
unc = Path(drive).resolve()
|
||||
dst = str(unc / _path)
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
|
|
|
|||
|
|
@ -21,6 +21,12 @@ def _get_script():
|
|||
if module_path.endswith(".pyc"):
|
||||
module_path = module_path[:-len(".pyc")] + ".py"
|
||||
|
||||
module_path = os.path.normpath(module_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
|
||||
network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
|
||||
|
||||
module_path = module_path.replace(mount_root, network_root)
|
||||
|
||||
return module_path
|
||||
|
||||
|
||||
|
|
@ -164,6 +170,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
output_dir = instance.data["outputDir"]
|
||||
metadata_path = os.path.join(output_dir, metadata_filename)
|
||||
|
||||
metadata_path = os.path.normpath(metadata_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
|
||||
network_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_PATH'])
|
||||
|
||||
metadata_path = metadata_path.replace(mount_root, network_root)
|
||||
|
||||
# Generate the payload for Deadline submission
|
||||
payload = {
|
||||
"JobInfo": {
|
||||
|
|
@ -282,6 +294,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
relative_path = os.path.relpath(source, api.registered_root())
|
||||
source = os.path.join("{root}", relative_path).replace("\\", "/")
|
||||
|
||||
# find subsets and version to attach render to
|
||||
attach_to = instance.data.get("attachTo")
|
||||
attach_subset_versions = []
|
||||
if attach_to:
|
||||
for subset in attach_to:
|
||||
for instance in context:
|
||||
if instance.data["subset"] != subset["subset"]:
|
||||
continue
|
||||
attach_subset_versions.append(
|
||||
{"version": instance.data["version"],
|
||||
"subset": subset["subset"],
|
||||
"family": subset["family"]})
|
||||
|
||||
# Write metadata for publish job
|
||||
metadata = {
|
||||
"asset": asset,
|
||||
|
|
|
|||
|
|
@ -6,9 +6,6 @@ from pype import api as pype
|
|||
import nuke
|
||||
|
||||
|
||||
log = pype.Logger().get_logger(__name__, "nuke")
|
||||
|
||||
|
||||
class CrateRead(avalon.nuke.Creator):
|
||||
# change this to template preset
|
||||
name = "ReadCopy"
|
||||
|
|
|
|||
|
|
@ -7,10 +7,6 @@ from pypeapp import config
|
|||
|
||||
import nuke
|
||||
|
||||
|
||||
log = pype.Logger().get_logger(__name__, "nuke")
|
||||
|
||||
|
||||
class CreateWriteRender(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteRender"
|
||||
|
|
|
|||
319
pype/plugins/nuke/load/load_backdrop.py
Normal file
319
pype/plugins/nuke/load/load_backdrop.py
Normal file
|
|
@ -0,0 +1,319 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
import nukescripts
|
||||
from pype.nuke import lib as pnlib
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import containerise, update_container
|
||||
reload(pnlib)
|
||||
|
||||
class LoadBackdropNodes(api.Loader):
|
||||
"""Loading Published Backdrop nodes (workfile, nukenodes)"""
|
||||
|
||||
representations = ["nk"]
|
||||
families = ["workfile", "nukenodes"]
|
||||
|
||||
label = "Iport Nuke Nodes"
|
||||
order = 0
|
||||
icon = "eye"
|
||||
color = style.colors.light
|
||||
node_color = "0x7533c1ff"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Loading function to import .nk file into script and wrap
|
||||
it on backdrop
|
||||
|
||||
Arguments:
|
||||
context (dict): context of version
|
||||
name (str): name of the version
|
||||
namespace (str): asset name
|
||||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
"""
|
||||
|
||||
# get main variables
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
namespace = namespace or context['asset']['name']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# prepare data for imprinting
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# getting file path
|
||||
file = self.fname.replace("\\", "/")
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
# Get mouse position
|
||||
n = nuke.createNode("NoOp")
|
||||
xcursor, ycursor = (n.xpos(), n.ypos())
|
||||
anlib.reset_selection()
|
||||
nuke.delete(n)
|
||||
|
||||
bdn_frame = 50
|
||||
|
||||
with anlib.maintained_selection():
|
||||
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
# get all pasted nodes
|
||||
new_nodes = list()
|
||||
nodes = nuke.selectedNodes()
|
||||
|
||||
# get pointer position in DAG
|
||||
xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame)
|
||||
|
||||
# reset position to all nodes and replace inputs and output
|
||||
for n in nodes:
|
||||
anlib.reset_selection()
|
||||
xpos = (n.xpos() - xcursor) + xpointer
|
||||
ypos = (n.ypos() - ycursor) + ypointer
|
||||
n.setXYpos(xpos, ypos)
|
||||
|
||||
# replace Input nodes for dots
|
||||
if n.Class() in "Input":
|
||||
dot = nuke.createNode("Dot")
|
||||
new_name = n.name().replace("INP", "DOT")
|
||||
dot.setName(new_name)
|
||||
dot["label"].setValue(new_name)
|
||||
dot.setXYpos(xpos, ypos)
|
||||
new_nodes.append(dot)
|
||||
|
||||
# rewire
|
||||
dep = n.dependent()
|
||||
for d in dep:
|
||||
index = next((i for i, dpcy in enumerate(
|
||||
d.dependencies())
|
||||
if n is dpcy), 0)
|
||||
d.setInput(index, dot)
|
||||
|
||||
# remove Input node
|
||||
anlib.reset_selection()
|
||||
nuke.delete(n)
|
||||
continue
|
||||
|
||||
# replace Input nodes for dots
|
||||
elif n.Class() in "Output":
|
||||
dot = nuke.createNode("Dot")
|
||||
new_name = n.name() + "_DOT"
|
||||
dot.setName(new_name)
|
||||
dot["label"].setValue(new_name)
|
||||
dot.setXYpos(xpos, ypos)
|
||||
new_nodes.append(dot)
|
||||
|
||||
# rewire
|
||||
dep = next((d for d in n.dependencies()), None)
|
||||
if dep:
|
||||
dot.setInput(0, dep)
|
||||
|
||||
# remove Input node
|
||||
anlib.reset_selection()
|
||||
nuke.delete(n)
|
||||
continue
|
||||
else:
|
||||
new_nodes.append(n)
|
||||
|
||||
# reselect nodes with new Dot instead of Inputs and Output
|
||||
anlib.reset_selection()
|
||||
anlib.select_nodes(new_nodes)
|
||||
# place on backdrop
|
||||
bdn = nukescripts.autoBackdrop()
|
||||
|
||||
# add frame offset
|
||||
xpos = bdn.xpos() - bdn_frame
|
||||
ypos = bdn.ypos() - bdn_frame
|
||||
bdwidth = bdn["bdwidth"].value() + (bdn_frame*2)
|
||||
bdheight = bdn["bdheight"].value() + (bdn_frame*2)
|
||||
|
||||
bdn["xpos"].setValue(xpos)
|
||||
bdn["ypos"].setValue(ypos)
|
||||
bdn["bdwidth"].setValue(bdwidth)
|
||||
bdn["bdheight"].setValue(bdheight)
|
||||
|
||||
bdn["name"].setValue(object_name)
|
||||
bdn["label"].setValue("Version tracked frame: \n`{}`\n\nPLEASE DO NOT REMOVE OR MOVE \nANYTHING FROM THIS FRAME!".format(object_name))
|
||||
bdn["note_font_size"].setValue(20)
|
||||
|
||||
return containerise(
|
||||
node=bdn,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
# get main variables
|
||||
# Get version from io
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
# get corresponding node
|
||||
GN = nuke.toNode(container['objectName'])
|
||||
|
||||
file = api.get_representation_path(representation).replace("\\", "/")
|
||||
context = representation["context"]
|
||||
name = container['name']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
namespace = container['namespace']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"representation": str(representation["_id"]),
|
||||
"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
xpos = GN.xpos()
|
||||
ypos = GN.ypos()
|
||||
avalon_data = anlib.get_avalon_knob_data(GN)
|
||||
nuke.delete(GN)
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
anlib.set_avalon_knob_data(GN, avalon_data)
|
||||
GN.setXYpos(xpos, ypos)
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
# change color of node
|
||||
if version.get("name") not in [max_version]:
|
||||
GN["tile_color"].setValue(int("0xd88467ff", 16))
|
||||
else:
|
||||
GN["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
return update_container(GN, data_imprint)
|
||||
|
||||
def connect_active_viewer(self, group_node):
|
||||
"""
|
||||
Finds Active viewer and
|
||||
place the node under it, also adds
|
||||
name of group into Input Process of the viewer
|
||||
|
||||
Arguments:
|
||||
group_node (nuke node): nuke group node object
|
||||
|
||||
"""
|
||||
group_node_name = group_node["name"].value()
|
||||
|
||||
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
|
||||
if len(viewer) > 0:
|
||||
viewer = viewer[0]
|
||||
else:
|
||||
self.log.error("Please create Viewer node before you "
|
||||
"run this action again")
|
||||
return None
|
||||
|
||||
# get coordinates of Viewer1
|
||||
xpos = viewer["xpos"].value()
|
||||
ypos = viewer["ypos"].value()
|
||||
|
||||
ypos += 150
|
||||
|
||||
viewer["ypos"].setValue(ypos)
|
||||
|
||||
# set coordinates to group node
|
||||
group_node["xpos"].setValue(xpos)
|
||||
group_node["ypos"].setValue(ypos + 50)
|
||||
|
||||
# add group node name to Viewer Input Process
|
||||
viewer["input_process_node"].setValue(group_node_name)
|
||||
|
||||
# put backdrop under
|
||||
pnlib.create_backdrop(label="Input Process", layer=2,
|
||||
nodes=[viewer, group_node], color="0x7c7faaff")
|
||||
|
||||
return True
|
||||
|
||||
def get_item(self, data, trackIndex, subTrackIndex):
|
||||
return {key: val for key, val in data.items()
|
||||
if subTrackIndex == val["subTrackIndex"]
|
||||
if trackIndex == val["trackIndex"]}
|
||||
|
||||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes trought all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
||||
Returns:
|
||||
dict: with fixed values and keys
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(input, dict):
|
||||
return {self.byteify(key): self.byteify(value)
|
||||
for key, value in input.iteritems()}
|
||||
elif isinstance(input, list):
|
||||
return [self.byteify(element) for element in input]
|
||||
elif isinstance(input, unicode):
|
||||
return input.encode('utf-8')
|
||||
else:
|
||||
return input
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
@ -6,7 +6,7 @@ import pype
|
|||
reload(pnlib)
|
||||
|
||||
|
||||
class ExtractReviewLutData(pype.api.Extractor):
|
||||
class ExtractReviewDataLut(pype.api.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
||||
must be run after extract_render_local.py
|
||||
|
|
@ -37,8 +37,9 @@ class ExtractReviewLutData(pype.api.Extractor):
|
|||
self.log.info(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
# generate data
|
||||
with anlib.maintained_selection():
|
||||
exporter = pnlib.Exporter_review_lut(
|
||||
exporter = pnlib.ExporterReviewLut(
|
||||
self, instance
|
||||
)
|
||||
data = exporter.generate_lut()
|
||||
|
|
|
|||
56
pype/plugins/nuke/publish/extract_review_data_mov.py
Normal file
56
pype/plugins/nuke/publish/extract_review_data_mov.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from avalon.nuke import lib as anlib
|
||||
from pype.nuke import lib as pnlib
|
||||
import pype
|
||||
reload(pnlib)
|
||||
|
||||
|
||||
class ExtractReviewDataMov(pype.api.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
||||
must be run after extract_render_local.py
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
label = "Extract Review Data Mov"
|
||||
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
self.log.info("Creating staging dir...")
|
||||
if "representations" in instance.data:
|
||||
staging_dir = instance.data[
|
||||
"representations"][0]["stagingDir"].replace("\\", "/")
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
instance.data["representations"][0]["tags"] = []
|
||||
else:
|
||||
instance.data["representations"] = []
|
||||
# get output path
|
||||
render_path = instance.data['path']
|
||||
staging_dir = os.path.normpath(os.path.dirname(render_path))
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
# generate data
|
||||
with anlib.maintained_selection():
|
||||
exporter = pnlib.ExporterReviewMov(
|
||||
self, instance)
|
||||
|
||||
if "render.farm" in families:
|
||||
instance.data["families"].remove("review")
|
||||
instance.data["families"].remove("ftrack")
|
||||
data = exporter.generate_mov(farm=True)
|
||||
else:
|
||||
data = exporter.generate_mov()
|
||||
|
||||
# assign to representations
|
||||
instance.data["representations"] += data["representations"]
|
||||
|
||||
self.log.debug(
|
||||
"_ representations: {}".format(instance.data["representations"]))
|
||||
|
|
@ -39,6 +39,25 @@ def _streams(source):
|
|||
return json.loads(out)['streams']
|
||||
|
||||
|
||||
def get_fps(str_value):
|
||||
if str_value == "0/0":
|
||||
print("Source has \"r_frame_rate\" value set to \"0/0\".")
|
||||
return "Unknown"
|
||||
|
||||
items = str_value.split("/")
|
||||
if len(items) == 1:
|
||||
fps = float(items[0])
|
||||
|
||||
elif len(items) == 2:
|
||||
fps = float(items[0]) / float(items[1])
|
||||
|
||||
# Check if fps is integer or float number
|
||||
if int(fps) == fps:
|
||||
fps = int(fps)
|
||||
|
||||
return str(fps)
|
||||
|
||||
|
||||
class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||
'''
|
||||
This is modification of OTIO FFmpeg Burnin adapter.
|
||||
|
|
@ -95,6 +114,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
streams = _streams(source)
|
||||
|
||||
super().__init__(source, streams)
|
||||
|
||||
if options_init:
|
||||
self.options_init.update(options_init)
|
||||
|
||||
|
|
@ -139,12 +159,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
options['frame_offset'] = start_frame
|
||||
|
||||
expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
|
||||
_text = str(int(self.end_frame + options['frame_offset']))
|
||||
if text and isinstance(text, str):
|
||||
text = r"{}".format(text)
|
||||
expr = text.replace("{current_frame}", expr)
|
||||
text = text.replace("{current_frame}", _text)
|
||||
|
||||
options['expression'] = expr
|
||||
text = str(int(self.end_frame + options['frame_offset']))
|
||||
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
|
||||
|
||||
def add_timecode(self, align, options=None, start_frame=None):
|
||||
|
|
@ -328,6 +349,17 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
|
|||
|
||||
frame_start = data.get("frame_start")
|
||||
frame_start_tc = data.get('frame_start_tc', frame_start)
|
||||
|
||||
stream = burnin._streams[0]
|
||||
if "resolution_width" not in data:
|
||||
data["resolution_width"] = stream.get("width", "Unknown")
|
||||
|
||||
if "resolution_height" not in data:
|
||||
data["resolution_height"] = stream.get("height", "Unknown")
|
||||
|
||||
if "fps" not in data:
|
||||
data["fps"] = get_fps(stream.get("r_frame_rate", "0/0"))
|
||||
|
||||
for align_text, preset in presets.get('burnins', {}).items():
|
||||
align = None
|
||||
if align_text == 'TOP_LEFT':
|
||||
|
|
@ -382,12 +414,14 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
|
|||
|
||||
elif bi_func == 'timecode':
|
||||
burnin.add_timecode(align, start_frame=frame_start_tc)
|
||||
|
||||
elif bi_func == 'text':
|
||||
if not preset.get('text'):
|
||||
log.error('Text is not set for text function burnin!')
|
||||
return
|
||||
text = preset['text'].format(**data)
|
||||
burnin.add_text(text, align)
|
||||
|
||||
elif bi_func == "datetime":
|
||||
date_format = preset["format"]
|
||||
burnin.add_datetime(date_format, align)
|
||||
|
|
@ -414,4 +448,4 @@ if __name__ == '__main__':
|
|||
data['codec'],
|
||||
data['output'],
|
||||
data['burnin_data']
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -299,14 +299,15 @@ class ComponentItem(QtWidgets.QFrame):
|
|||
class LightingButton(QtWidgets.QPushButton):
|
||||
lightingbtnstyle = """
|
||||
QPushButton {
|
||||
font: %(font_size_pt)spt;
|
||||
text-align: center;
|
||||
color: #777777;
|
||||
background-color: transparent;
|
||||
border-width: 1px;
|
||||
border-color: #777777;
|
||||
border-style: solid;
|
||||
padding-top: 2px;
|
||||
padding-bottom: 2px;
|
||||
padding-top: 0px;
|
||||
padding-bottom: 0px;
|
||||
padding-left: 3px;
|
||||
padding-right: 3px;
|
||||
border-radius: 3px;
|
||||
|
|
@ -343,18 +344,13 @@ class LightingButton(QtWidgets.QPushButton):
|
|||
}
|
||||
"""
|
||||
|
||||
def __init__(self, text, *args, **kwargs):
|
||||
super().__init__(text, *args, **kwargs)
|
||||
self.setStyleSheet(self.lightingbtnstyle)
|
||||
|
||||
def __init__(self, text, font_size_pt=8, *args, **kwargs):
|
||||
super(LightingButton, self).__init__(text, *args, **kwargs)
|
||||
self.setStyleSheet(self.lightingbtnstyle % {
|
||||
"font_size_pt": font_size_pt
|
||||
})
|
||||
self.setCheckable(True)
|
||||
|
||||
preview_font_metrics = self.fontMetrics().boundingRect(text)
|
||||
width = preview_font_metrics.width() + 16
|
||||
height = preview_font_metrics.height() + 5
|
||||
self.setMaximumWidth(width)
|
||||
self.setMaximumHeight(height)
|
||||
|
||||
|
||||
class PngFactory:
|
||||
png_names = {
|
||||
|
|
|
|||
34
res/ftrack/action_icons/Delivery.svg
Normal file
34
res/ftrack/action_icons/Delivery.svg
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
<?xml version="1.0" ?>
|
||||
<svg id="Icons" version="1.1" viewBox="0 0 512 512" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g>
|
||||
<linearGradient gradientUnits="userSpaceOnUse" id="SVGID_1_" x1="-0.0000027" x2="512" y1="256" y2="256">
|
||||
<stop offset="0" style="stop-color:#00b300"/>
|
||||
<stop offset="1" style="stop-color:#006600"/>
|
||||
</linearGradient>
|
||||
<circle cx="256" cy="256" fill="url(#SVGID_1_)" r="256"/>
|
||||
<linearGradient gradientUnits="userSpaceOnUse" id="SVGID_2_" x1="42.6666641" x2="469.3333435" y1="256.0005188" y2="256.0005188">
|
||||
<stop offset="0" style="stop-color:#006600"/>
|
||||
<stop offset="1" style="stop-color:#00b300"/>
|
||||
</linearGradient>
|
||||
<path d="M256,469.3338623c-117.6314697,0-213.3333435-95.7023926-213.3333435-213.3333435 c0-117.6314545,95.7018661-213.333313,213.3333435-213.333313c117.6357422,0,213.3333435,95.7018661,213.3333435,213.333313 C469.3333435,373.6314697,373.6357422,469.3338623,256,469.3338623z" fill="url(#SVGID_2_)"/>
|
||||
</g>
|
||||
<g transform="
|
||||
translate(120, 120)
|
||||
scale(9)
|
||||
">
|
||||
|
||||
<style type="text/css">
|
||||
.st0{fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:10;}
|
||||
.st1{fill:none;stroke:#000000;stroke-width:2;stroke-linejoin:round;stroke-miterlimit:10;}
|
||||
.st2{fill:none;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;}
|
||||
.st3{fill:none;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-miterlimit:10;}
|
||||
.st4{fill:none;stroke:#000000;stroke-width:2;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:3;}
|
||||
</style>
|
||||
<polyline class="st1" points="2,8 19,8 19,23 13,23 "/>
|
||||
<circle class="st1" cx="24" cy="23" r="2"/>
|
||||
<circle class="st1" cx="8" cy="23" r="2"/>
|
||||
<polyline class="st1" points="19,23 19,12 25,12 29,17 29,23 26,23 "/>
|
||||
<line class="st1" x1="4" x2="13" y1="12" y2="12"/>
|
||||
<line class="st1" x1="2" x2="11" y1="16" y2="16"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2 KiB |
Loading…
Add table
Add a link
Reference in a new issue