Merge remote-tracking branch 'origin/develop' into feature/PYPE-570-maya-renderlayer-creator

This commit is contained in:
Ondrej Samohel 2020-01-21 10:16:40 +01:00
commit 4b04a19a44
No known key found for this signature in database
GPG key ID: 8A29C663C672C2B7
83 changed files with 8047 additions and 642 deletions

34
pype/blender/__init__.py Normal file
View file

@ -0,0 +1,34 @@
import logging
from pathlib import Path
import os
import bpy
from avalon import api as avalon
from pyblish import api as pyblish
from .plugin import AssetLoader
logger = logging.getLogger("pype.blender")
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create")
def install():
"""Install Blender configuration for Avalon."""
pyblish.register_plugin_path(str(PUBLISH_PATH))
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
def uninstall():
"""Uninstall Blender configuration for Avalon."""
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))

47
pype/blender/action.py Normal file
View file

@ -0,0 +1,47 @@
import bpy
import pyblish.api
from ..action import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid objects in Blender when a publish plug-in failed."""
label = "Select Invalid"
on = "failed"
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning(
"Failed plug-in doens't have any selectable objects."
)
bpy.ops.object.select_all(action='DESELECT')
# Make sure every node is only processed once
invalid = list(set(invalid))
if not invalid:
self.log.info("No invalid nodes found.")
return
invalid_names = [obj.name for obj in invalid]
self.log.info(
"Selecting invalid objects: %s", ", ".join(invalid_names)
)
# Select the objects and also make the last one the active object.
for obj in invalid:
obj.select_set(True)
bpy.context.view_layer.objects.active = invalid[-1]

135
pype/blender/plugin.py Normal file
View file

@ -0,0 +1,135 @@
"""Shared functionality for pipeline plugins for Blender."""
from pathlib import Path
from typing import Dict, List, Optional
import bpy
from avalon import api
VALID_EXTENSIONS = [".blend"]
def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str:
"""Return a consistent name for a model asset."""
name = f"{asset}_{subset}"
if namespace:
name = f"{namespace}:{name}"
return name
class AssetLoader(api.Loader):
"""A basic AssetLoader for Blender
This will implement the basic logic for linking/appending assets
into another Blender scene.
The `update` method should be implemented by a sub-class, because
it's different for different types (e.g. model, rig, animation,
etc.).
"""
@staticmethod
def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]:
"""Get the 'instance empty' that holds the collection instance."""
for node in nodes:
if not isinstance(node, bpy.types.Object):
continue
if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION'
and node.instance_collection and node.name == instance_name):
return node
return None
@staticmethod
def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]:
"""Get the 'instance collection' (container) for this asset."""
for node in nodes:
if not isinstance(node, bpy.types.Collection):
continue
if node.name == instance_name:
return node
return None
@staticmethod
def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library:
"""Find the library file from the container.
It traverses the objects from this collection, checks if there is only
1 library from which the objects come from and returns the library.
Warning:
No nested collections are supported at the moment!
"""
assert not container.children, "Nested collections are not supported."
assert container.objects, "The collection doesn't contain any objects."
libraries = set()
for obj in container.objects:
assert obj.library, f"'{obj.name}' is not linked."
libraries.add(obj.library)
assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library."
return list(libraries)[0]
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def load(self,
context: dict,
name: Optional[str] = None,
namespace: Optional[str] = None,
options: Optional[Dict] = None) -> Optional[bpy.types.Collection]:
"""Load asset via database
Arguments:
context: Full parenthood of representation to load
name: Use pre-defined name
namespace: Use pre-defined namespace
options: Additional settings dictionary
"""
# TODO (jasper): make it possible to add the asset several times by
# just re-using the collection
assert Path(self.fname).exists(), f"{self.fname} doesn't exist."
self.process_asset(
context=context,
name=name,
namespace=namespace,
options=options,
)
# Only containerise if anything was loaded by the Loader.
nodes = self[:]
if not nodes:
return None
# Only containerise if it's not already a collection from a .blend file.
representation = context["representation"]["name"]
if representation != "blend":
from avalon.blender.pipeline import containerise
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__,
)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
instance_name = model_name(asset, subset, namespace)
return self._get_instance_collection(instance_name, nodes)
def update(self, container: Dict, representation: Dict):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def remove(self, container: Dict) -> bool:
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")

View file

@ -0,0 +1,538 @@
import os
import copy
import shutil
import collections
import string
import clique
from bson.objectid import ObjectId
from avalon import pipeline
from avalon.vendor import filelink
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
from pypeapp import Anatomy
from pype.ftrack import BaseAction
from pype.ftrack.lib.avalon_sync import CustAttrIdKey
class Delivery(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = "delivery.action"
#: Action label.
label = "Delivery"
#: Action description.
description = "Deliver data to client"
#: roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project manager"]
icon = '{}/ftrack/action_icons/Delivery.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
db_con = DbConnector()
def discover(self, session, entities, event):
''' Validation '''
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
return False
def interface(self, session, entities, event):
if event["data"].get("values", {}):
return
title = "Delivery data to Client"
items = []
item_splitter = {"type": "label", "value": "---"}
# Prepare component names for processing
components = None
project = None
for entity in entities:
if project is None:
project_id = None
for ent_info in entity["link"]:
if ent_info["type"].lower() == "project":
project_id = ent_info["id"]
break
if project_id is None:
project = entity["asset"]["parent"]["project"]
else:
project = session.query((
"select id, full_name from Project where id is \"{}\""
).format(project_id)).one()
_components = set(
[component["name"] for component in entity["components"]]
)
if components is None:
components = _components
continue
components = components.intersection(_components)
if not components:
break
project_name = project["full_name"]
items.append({
"type": "hidden",
"name": "__project_name__",
"value": project_name
})
# Prpeare anatomy data
anatomy = Anatomy(project_name)
new_anatomies = []
first = None
for key in (anatomy.templates.get("delivery") or {}):
new_anatomies.append({
"label": key,
"value": key
})
if first is None:
first = key
skipped = False
# Add message if there are any common components
if not components or not new_anatomies:
skipped = True
items.append({
"type": "label",
"value": "<h1>Something went wrong:</h1>"
})
items.append({
"type": "hidden",
"name": "__skipped__",
"value": skipped
})
if not components:
if len(entities) == 1:
items.append({
"type": "label",
"value": (
"- Selected entity doesn't have components to deliver."
)
})
else:
items.append({
"type": "label",
"value": (
"- Selected entities don't have common components."
)
})
# Add message if delivery anatomies are not set
if not new_anatomies:
items.append({
"type": "label",
"value": (
"- `\"delivery\"` anatomy key is not set in config."
)
})
# Skip if there are any data shortcomings
if skipped:
return {
"items": items,
"title": title
}
items.append({
"value": "<h1>Choose Components to deliver</h1>",
"type": "label"
})
for component in components:
items.append({
"type": "boolean",
"value": False,
"label": component,
"name": component
})
items.append(item_splitter)
items.append({
"value": "<h2>Location for delivery</h2>",
"type": "label"
})
items.append({
"type": "label",
"value": (
"<i>NOTE: It is possible to replace `root` key in anatomy.</i>"
)
})
items.append({
"type": "text",
"name": "__location_path__",
"empty_text": "Type location path here...(Optional)"
})
items.append(item_splitter)
items.append({
"value": "<h2>Anatomy of delivery files</h2>",
"type": "label"
})
items.append({
"type": "label",
"value": (
"<p><i>NOTE: These can be set in Anatomy.yaml"
" within `delivery` key.</i></p>"
)
})
items.append({
"type": "enumerator",
"name": "__new_anatomies__",
"data": new_anatomies,
"value": first
})
return {
"items": items,
"title": title
}
def launch(self, session, entities, event):
if "values" not in event["data"]:
return
self.report_items = collections.defaultdict(list)
values = event["data"]["values"]
skipped = values.pop("__skipped__")
if skipped:
return None
component_names = []
location_path = values.pop("__location_path__")
anatomy_name = values.pop("__new_anatomies__")
project_name = values.pop("__project_name__")
for key, value in values.items():
if value is True:
component_names.append(key)
if not component_names:
return {
"success": True,
"message": "Not selected components to deliver."
}
location_path = location_path.strip()
if location_path:
location_path = os.path.normpath(location_path)
if not os.path.exists(location_path):
return {
"success": False,
"message": (
"Entered location path does not exists. \"{}\""
).format(location_path)
}
self.db_con.install()
self.db_con.Session["AVALON_PROJECT"] = project_name
repres_to_deliver = []
for entity in entities:
asset = entity["asset"]
subset_name = asset["name"]
version = entity["version"]
parent = asset["parent"]
parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if parent_mongo_id:
parent_mongo_id = ObjectId(parent_mongo_id)
else:
asset_ent = self.db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
if not asset_ent:
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
msg = "Not synchronized entities to avalon"
self.report_items[msg].append(ent_path)
self.log.warning("{} <{}>".format(msg, ent_path))
continue
parent_mongo_id = asset_ent["_id"]
subset_ent = self.db_con.find_one({
"type": "subset",
"parent": parent_mongo_id,
"name": subset_name
})
version_ent = self.db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
repre_ents = self.db_con.find({
"type": "representation",
"parent": version_ent["_id"]
})
repres_by_name = {}
for repre in repre_ents:
repre_name = repre["name"]
repres_by_name[repre_name] = repre
for component in entity["components"]:
comp_name = component["name"]
if comp_name not in component_names:
continue
repre = repres_by_name.get(comp_name)
repres_to_deliver.append(repre)
if not location_path:
location_path = os.environ.get("AVALON_PROJECTS") or ""
print(location_path)
anatomy = Anatomy(project_name)
for repre in repres_to_deliver:
# Get destination repre path
anatomy_data = copy.deepcopy(repre["context"])
anatomy_data["root"] = location_path
anatomy_filled = anatomy.format(anatomy_data)
test_path = (
anatomy_filled
.get("delivery", {})
.get(anatomy_name)
)
if not test_path:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(anatomy_name)
all_anatomies = anatomy.format_all(anatomy_data)
result = None
for anatomies in all_anatomies.values():
for key, temp in anatomies.get("delivery", {}).items():
if key != anatomy_name:
continue
result = temp
break
# TODO log error! - missing keys in anatomy
if result:
missing_keys = [
key[1] for key in string.Formatter().parse(result)
if key[1] is not None
]
else:
missing_keys = ["unknown"]
keys = ", ".join(missing_keys)
sub_msg = (
"Representation: {}<br>- Missing keys: \"{}\"<br>"
).format(str(repre["_id"]), keys)
self.report_items[msg].append(sub_msg)
self.log.warning(
"{} Representation: \"{}\" Filled: <{}>".format(
msg, str(repre["_id"]), str(result)
)
)
continue
# Get source repre path
frame = repre['context'].get('frame')
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
repre_path = self.path_from_represenation(repre)
# TODO add backup solution where root of path from component
# is repalced with AVALON_PROJECTS root
if not frame:
self.process_single_file(
repre_path, anatomy, anatomy_name, anatomy_data
)
else:
self.process_sequence(
repre_path, anatomy, anatomy_name, anatomy_data
)
self.db_con.uninstall()
return self.report()
def process_single_file(
self, repre_path, anatomy, anatomy_name, anatomy_data
):
anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
self.copy_file(repre_path, delivery_path)
def process_sequence(
self, repre_path, anatomy, anatomy_name, anatomy_data
):
dir_path, file_name = os.path.split(str(repre_path))
base_name, ext = os.path.splitext(file_name)
file_name_items = None
if "#" in base_name:
file_name_items = [part for part in base_name.split("#") if part]
elif "%" in base_name:
file_name_items = base_name.split("%")
if not file_name_items:
msg = "Source file was not found"
self.report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
# skip if collection don't have same basename
if not col.head.startswith(file_name_items[0]):
continue
src_collection = col
break
if src_collection is None:
# TODO log error!
msg = "Source collection of files was not found"
self.report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name]
print(delivery_path)
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
self.copy_file(src, dst)
def path_from_represenation(self, representation):
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = os.environ.get("AVALON_PROJECTS") or ""
path = pipeline.format_template_with_optional_keys(
context, template
)
except KeyError:
# Template references unavailable data
return None
return os.path.normpath(path)
def copy_file(self, src_path, dst_path):
if os.path.exists(dst_path):
return
try:
filelink.create(
src_path,
dst_path,
filelink.HARDLINK
)
except OSError:
shutil.copyfile(src_path, dst_path)
def report(self):
items = []
title = "Delivery report"
for msg, _items in self.report_items.items():
if not _items:
continue
if items:
items.append({"type": "label", "value": "---"})
items.append({
"type": "label",
"value": "# {}".format(msg)
})
if not isinstance(_items, (list, tuple)):
_items = [_items]
__items = []
for item in _items:
__items.append(str(item))
items.append({
"type": "label",
"value": '<p>{}</p>'.format("<br>".join(__items))
})
if not items:
return {
"success": True,
"message": "Delivery Finished"
}
return {
"items": items,
"title": title,
"success": False,
"message": "Delivery Finished"
}
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
Delivery(session, plugins_presets).register()

View file

@ -70,7 +70,10 @@ class SyncToAvalonLocal(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
self.entities_factory.launch_setup(ft_project_name)
output = self.entities_factory.launch_setup(ft_project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()

View file

@ -105,7 +105,10 @@ class SyncToAvalonServer(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
self.entities_factory.launch_setup(ft_project_name)
output = self.entities_factory.launch_setup(ft_project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()

View file

@ -28,7 +28,7 @@ class SyncToAvalonEvent(BaseEvent):
ignore_entTypes = [
"socialfeed", "socialnotification", "note",
"assetversion", "job", "user", "reviewsessionobject", "timer",
"timelog", "auth_userrole"
"timelog", "auth_userrole", "appointment"
]
ignore_ent_types = ["Milestone"]
ignore_keys = ["statusid"]
@ -131,7 +131,9 @@ class SyncToAvalonEvent(BaseEvent):
ftrack_id = proj["data"]["ftrackId"]
self._avalon_ents_by_ftrack_id[ftrack_id] = proj
for ent in ents:
ftrack_id = ent["data"]["ftrackId"]
ftrack_id = ent["data"].get("ftrackId")
if ftrack_id is None:
continue
self._avalon_ents_by_ftrack_id[ftrack_id] = ent
return self._avalon_ents_by_ftrack_id
@ -1427,6 +1429,93 @@ class SyncToAvalonEvent(BaseEvent):
parent_id = ent_info["parentId"]
new_tasks_by_parent[parent_id].append(ent_info)
pop_out_ents.append(ftrack_id)
continue
name = (
ent_info
.get("changes", {})
.get("name", {})
.get("new")
)
avalon_ent_by_name = self.avalon_ents_by_name.get(name)
avalon_ent_by_name_ftrack_id = (
avalon_ent_by_name
.get("data", {})
.get("ftrackId")
)
if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None:
ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
if not ftrack_ent:
ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], ftrack_id
)
).one()
self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
avalon_ent_parents = (
avalon_ent_by_name.get("data", {}).get("parents")
)
if parents == avalon_ent_parents:
self.dbcon.update_one({
"_id": avalon_ent_by_name["_id"]
}, {
"$set": {
"data.ftrackId": ftrack_id,
"data.entityType": entity_type
}
})
avalon_ent_by_name["data"]["ftrackId"] = ftrack_id
avalon_ent_by_name["data"]["entityType"] = entity_type
self._avalon_ents_by_ftrack_id[ftrack_id] = (
avalon_ent_by_name
)
if self._avalon_ents_by_parent_id:
found = None
for _parent_id_, _entities_ in (
self._avalon_ents_by_parent_id.items()
):
for _idx_, entity in enumerate(_entities_):
if entity["_id"] == avalon_ent_by_name["_id"]:
found = (_parent_id_, _idx_)
break
if found:
break
if found:
_parent_id_, _idx_ = found
self._avalon_ents_by_parent_id[_parent_id_][
_idx_] = avalon_ent_by_name
if self._avalon_ents_by_id:
self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = (
avalon_ent_by_name
)
if self._avalon_ents_by_name:
self._avalon_ents_by_name[name] = avalon_ent_by_name
if self._avalon_ents:
found = None
project, entities = self._avalon_ents
for _idx_, _ent_ in enumerate(entities):
if _ent_["_id"] != avalon_ent_by_name["_id"]:
continue
found = _idx_
break
if found is not None:
entities[found] = avalon_ent_by_name
self._avalon_ents = project, entities
pop_out_ents.append(ftrack_id)
continue
configuration_id = entity_type_conf_ids.get(entity_type)
if not configuration_id:
@ -1438,9 +1527,11 @@ class SyncToAvalonEvent(BaseEvent):
if attr["entity_type"] != ent_info["entityType"]:
continue
if ent_info["entityType"] != "show":
if attr["object_type_id"] != ent_info["objectTypeId"]:
continue
if (
ent_info["entityType"] == "task" and
attr["object_type_id"] != ent_info["objectTypeId"]
):
continue
configuration_id = attr["id"]
entity_type_conf_ids[entity_type] = configuration_id
@ -1712,7 +1803,8 @@ class SyncToAvalonEvent(BaseEvent):
if ca_ent_type == "show":
cust_attrs_by_obj_id[ca_ent_type][key] = cust_attr
else:
elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
cust_attrs_by_obj_id[obj_id][key] = cust_attr

View file

@ -265,6 +265,37 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
return self._send_packet(self._code_name_mapping["heartbeat"])
return super()._handle_packet(code, packet_identifier, path, data)
class UserEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(UserEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"hearbeat")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(UserEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class SocketSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(

View file

@ -26,6 +26,8 @@ class SocketThread(threading.Thread):
self.mongo_error = False
self._temp_data = {}
def stop(self):
self._is_running = False
@ -81,8 +83,9 @@ class SocketThread(threading.Thread):
try:
if not self._is_running:
break
data = None
try:
data = connection.recv(16)
data = self.get_data_from_con(connection)
time_con = time.time()
except socket.timeout:
@ -99,10 +102,7 @@ class SocketThread(threading.Thread):
self._is_running = False
break
if data:
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)
self._handle_data(connection, data)
except Exception as exc:
self.log.error(
@ -121,3 +121,14 @@ class SocketThread(threading.Thread):
for line in lines:
os.write(1, line)
self.finished = True
def get_data_from_con(self, connection):
return connection.recv(16)
def _handle_data(self, connection, data):
if not data:
return
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)

View file

@ -0,0 +1,51 @@
import sys
import signal
import socket
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub
from pypeapp import Logger
log = Logger().get_logger(__name__)
def main(args):
port = int(args[-1])
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ("localhost", port)
log.debug("Storer connected to {} port {}".format(*server_address))
sock.connect(server_address)
sock.sendall(b"CreatedUser")
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub
)
server = FtrackServer("action")
log.debug("Launched Ftrack Event storer")
server.run_server(session=session)
finally:
log.debug("Closing socket")
sock.close()
return 1
if __name__ == "__main__":
# Register interupt signal
def signal_handler(sig, frame):
log.info(
"Process was forced to stop. Process ended."
)
log.info("Process ended.")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
sys.exit(main(sys.argv))

View file

@ -314,6 +314,9 @@ class SyncEntitiesFactory:
self.log.warning(msg)
return {"success": False, "message": msg}
self.log.debug((
"*** Synchronization initialization started <{}>."
).format(project_full_name))
# Check if `avalon_mongo_id` custom attribute exist or is accessible
if CustAttrIdKey not in ft_project["custom_attributes"]:
items = []
@ -699,7 +702,7 @@ class SyncEntitiesFactory:
if ca_ent_type == "show":
avalon_attrs[ca_ent_type][key] = cust_attr["default"]
avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"]
else:
elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
avalon_attrs[obj_id][key] = cust_attr["default"]
avalon_attrs_ca_id[obj_id][key] = cust_attr["id"]
@ -708,7 +711,7 @@ class SyncEntitiesFactory:
if ca_ent_type == "show":
attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"]
attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"]
else:
elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
attrs_per_entity_type[obj_id][key] = cust_attr["default"]
attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"]

View file

@ -1,26 +1,27 @@
import os
import json
import threading
import time
from Qt import QtCore, QtGui, QtWidgets
import datetime
import threading
from Qt import QtCore, QtWidgets
import ftrack_api
from pypeapp import style
from pype.ftrack import FtrackServer, check_ftrack_url, credentials
from ..ftrack_server.lib import check_ftrack_url
from ..ftrack_server import socket_thread
from ..lib import credentials
from . import login_dialog
from pype import api as pype
from pypeapp import Logger
log = pype.Logger().get_logger("FtrackModule", "ftrack")
log = Logger().get_logger("FtrackModule", "ftrack")
class FtrackModule:
def __init__(self, main_parent=None, parent=None):
self.parent = parent
self.widget_login = login_dialog.Login_Dialog_ui(self)
self.action_server = FtrackServer('action')
self.thread_action_server = None
self.thread_socket_server = None
self.thread_timer = None
self.bool_logged = False
@ -75,14 +76,6 @@ class FtrackModule:
# Actions part
def start_action_server(self):
self.bool_action_thread_running = True
self.set_menu_visibility()
if (
self.thread_action_server is not None and
self.bool_action_thread_running is False
):
self.stop_action_server()
if self.thread_action_server is None:
self.thread_action_server = threading.Thread(
target=self.set_action_server
@ -90,35 +83,114 @@ class FtrackModule:
self.thread_action_server.start()
def set_action_server(self):
first_check = True
while self.bool_action_thread_running is True:
if not check_ftrack_url(os.environ['FTRACK_SERVER']):
if first_check:
log.warning(
"Could not connect to Ftrack server"
)
first_check = False
if self.bool_action_server_running:
return
self.bool_action_server_running = True
self.bool_action_thread_running = False
ftrack_url = os.environ['FTRACK_SERVER']
parent_file_path = os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
min_fail_seconds = 5
max_fail_count = 3
wait_time_after_max_fail = 10
# Threads data
thread_name = "ActionServerThread"
thread_port = 10021
subprocess_path = (
"{}/ftrack_server/sub_user_server.py".format(parent_file_path)
)
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
last_failed = datetime.datetime.now()
failed_count = 0
ftrack_accessible = False
printed_ftrack_error = False
# Main loop
while True:
if not self.bool_action_server_running:
log.debug("Action server was pushed to stop.")
break
# Check if accessible Ftrack and Mongo url
if not ftrack_accessible:
ftrack_accessible = check_ftrack_url(ftrack_url)
# Run threads only if Ftrack is accessible
if not ftrack_accessible:
if not printed_ftrack_error:
log.warning("Can't access Ftrack {}".format(ftrack_url))
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
self.bool_action_thread_running = False
self.set_menu_visibility()
printed_ftrack_error = True
time.sleep(1)
continue
log.info(
"Connected to Ftrack server. Running actions session"
)
try:
self.bool_action_server_running = True
printed_ftrack_error = False
# Run backup thread which does not requeire mongo to work
if self.thread_socket_server is None:
if failed_count < max_fail_count:
self.thread_socket_server = socket_thread.SocketThread(
thread_name, thread_port, subprocess_path
)
self.thread_socket_server.start()
self.bool_action_thread_running = True
self.set_menu_visibility()
elif failed_count == max_fail_count:
log.warning((
"Action server failed {} times."
" I'll try to run again {}s later"
).format(
str(max_fail_count), str(wait_time_after_max_fail))
)
failed_count += 1
elif ((
datetime.datetime.now() - last_failed
).seconds > wait_time_after_max_fail):
failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not self.thread_socket_server.isAlive():
self.thread_socket_server_thread.join()
self.thread_socket_server = None
ftrack_accessible = False
self.bool_action_thread_running = False
self.set_menu_visibility()
self.action_server.run_server()
if self.bool_action_thread_running:
log.debug("Ftrack action server has stopped")
except Exception:
log.warning(
"Ftrack Action server crashed. Trying to connect again",
exc_info=True
)
self.bool_action_server_running = False
self.set_menu_visibility()
first_check = True
_last_failed = datetime.datetime.now()
delta_time = (_last_failed - last_failed).seconds
if delta_time < min_fail_seconds:
failed_count += 1
else:
failed_count = 0
last_failed = _last_failed
time.sleep(1)
self.bool_action_thread_running = False
self.bool_action_server_running = False
self.set_menu_visibility()
def reset_action_server(self):
self.stop_action_server()
@ -126,16 +198,18 @@ class FtrackModule:
def stop_action_server(self):
try:
self.bool_action_thread_running = False
self.action_server.stop_session()
self.bool_action_server_running = False
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
if self.thread_action_server is not None:
self.thread_action_server.join()
self.thread_action_server = None
log.info("Ftrack action server was forced to stop")
self.bool_action_server_running = False
self.set_menu_visibility()
except Exception:
log.warning(
"Error has happened during Killing action server",
@ -201,9 +275,9 @@ class FtrackModule:
self.stop_timer_thread()
return
self.aRunActionS.setVisible(not self.bool_action_thread_running)
self.aRunActionS.setVisible(not self.bool_action_server_running)
self.aResetActionS.setVisible(self.bool_action_thread_running)
self.aStopActionS.setVisible(self.bool_action_thread_running)
self.aStopActionS.setVisible(self.bool_action_server_running)
if self.bool_timer_event is False:
self.start_timer_thread()

View file

@ -18,13 +18,16 @@ def _subprocess(*args, **kwargs):
"""Convenience method for getting output errors for subprocess."""
# make sure environment contains only strings
filtered_env = {k: str(v) for k, v in os.environ.items()}
if not kwargs.get("env"):
filtered_env = {k: str(v) for k, v in os.environ.items()}
else:
filtered_env = {k: str(v) for k, v in kwargs.get("env").items()}
# set overrides
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
kwargs['stderr'] = kwargs.get('stderr', subprocess.STDOUT)
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
kwargs['env'] = kwargs.get('env',filtered_env)
kwargs['env'] = filtered_env
proc = subprocess.Popen(*args, **kwargs)
@ -193,9 +196,13 @@ def any_outdated():
if representation in checked:
continue
representation_doc = io.find_one({"_id": io.ObjectId(representation),
"type": "representation"},
projection={"parent": True})
representation_doc = io.find_one(
{
"_id": io.ObjectId(representation),
"type": "representation"
},
projection={"parent": True}
)
if representation_doc and not is_latest(representation_doc):
return True
elif not representation_doc:
@ -305,27 +312,38 @@ def switch_item(container,
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
asset = io.find_one({
"name": asset_name,
"type": "asset"
})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
subset = io.find_one({
"name": subset_name,
"type": "subset",
"parent": asset["_id"]
})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[('name', -1)]
)
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
representation = io.find_one({
"name": representation_name,
"type": "representation",
"parent": version["_id"]}
)
assert representation, ("Could not find representation in the database with"
" the name '%s'" % representation_name)
@ -363,7 +381,10 @@ def get_asset(asset_name=None):
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
asset_document = io.find_one({"name": asset_name, "type": "asset"})
asset_document = io.find_one({
"name": asset_name,
"type": "asset"
})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
@ -535,8 +556,7 @@ def get_subsets(asset_name,
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset",
"name": asset_name})
asset_io = io.find_one({"type": "asset", "name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
@ -560,14 +580,20 @@ def get_subsets(asset_name,
# Process subsets
for subset in subsets:
if not version:
version_sel = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version_sel = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
else:
assert isinstance(version, int), "version needs to be `int` type"
version_sel = io.find_one({"type": "version",
"parent": subset["_id"],
"name": int(version)})
version_sel = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": int(version)
})
find_dict = {"type": "representation",
"parent": version_sel["_id"]}

View file

@ -707,9 +707,11 @@ class WorkfileSettings(object):
frame_start = int(data["frameStart"]) - handle_start
frame_end = int(data["frameEnd"]) + handle_end
self._root_node["lock_range"].setValue(False)
self._root_node["fps"].setValue(fps)
self._root_node["first_frame"].setValue(frame_start)
self._root_node["last_frame"].setValue(frame_end)
self._root_node["lock_range"].setValue(True)
# setting active viewers
try:
@ -1197,13 +1199,13 @@ class BuildWorkfile(WorkfileSettings):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
class Exporter_review_lut:
class ExporterReview:
"""
Generator object for review lut from Nuke
Base class object for generating review data from Nuke
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
_temp_nodes = []
@ -1213,94 +1215,15 @@ class Exporter_review_lut:
def __init__(self,
klass,
instance,
name=None,
ext=None,
cube_size=None,
lut_size=None,
lut_style=None):
instance
):
self.log = klass.log
self.instance = instance
self.name = name or "baked_lut"
self.ext = ext or "cube"
self.cube_size = cube_size or 32
self.lut_size = lut_size or 1024
self.lut_style = lut_style or "linear"
self.stagingDir = self.instance.data["stagingDir"]
self.path_in = self.instance.data.get("path", None)
self.staging_dir = self.instance.data["stagingDir"]
self.collection = self.instance.data.get("collection", None)
# set frame start / end and file name to self
self.get_file_info()
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
self.path = os.path.join(self.stagingDir, self.file).replace("\\", "/")
def generate_lut(self):
# ---------- start nodes creation
# CMSTestPattern
cms_node = nuke.createNode("CMSTestPattern")
cms_node["cube_size"].setValue(self.cube_size)
# connect
self._temp_nodes.append(cms_node)
self.previous_node = cms_node
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
# Node View Process
ipn = self.get_view_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes.append(ipn)
self.previous_node = ipn
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
# GenerateLUT
gen_lut_node = nuke.createNode("GenerateLUT")
gen_lut_node["file"].setValue(self.path)
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
gen_lut_node["lut1d"].setValue(self.lut_size)
gen_lut_node["style1d"].setValue(self.lut_style)
# connect
gen_lut_node.setInput(0, self.previous_node)
self._temp_nodes.append(gen_lut_node)
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
# ---------- end nodes creation
# Export lut file
nuke.execute(
gen_lut_node.name(),
int(self.first_frame),
int(self.first_frame))
self.log.info("Exported...")
# ---------- generate representation data
self.get_representation_data()
self.log.debug("Representation... `{}`".format(self.data))
# ---------- Clean up
for node in self._temp_nodes:
nuke.delete(node)
self.log.info("Deleted nodes...")
return self.data
def get_file_info(self):
if self.collection:
self.log.debug("Collection: `{}`".format(self.collection))
@ -1312,8 +1235,10 @@ class Exporter_review_lut:
# get first and last frame
self.first_frame = min(self.collection.indexes)
self.last_frame = max(self.collection.indexes)
if "slate" in self.instance.data["families"]:
self.first_frame += 1
else:
self.fname = os.path.basename(self.instance.data.get("path", None))
self.fname = os.path.basename(self.path_in)
self.fhead = os.path.splitext(self.fname)[0] + "."
self.first_frame = self.instance.data.get("frameStart", None)
self.last_frame = self.instance.data.get("frameEnd", None)
@ -1321,17 +1246,26 @@ class Exporter_review_lut:
if "#" in self.fhead:
self.fhead = self.fhead.replace("#", "")[:-1]
def get_representation_data(self):
def get_representation_data(self, tags=None, range=False):
add_tags = []
if tags:
add_tags = tags
repre = {
'name': self.name,
'ext': self.ext,
'files': self.file,
"stagingDir": self.stagingDir,
"stagingDir": self.staging_dir,
"anatomy_template": "publish",
"tags": [self.name.replace("_", "-")]
"tags": [self.name.replace("_", "-")] + add_tags
}
if range:
repre.update({
"frameStart": self.first_frame,
"frameEnd": self.last_frame,
})
self.data["representations"].append(repre)
def get_view_process_node(self):
@ -1366,6 +1300,252 @@ class Exporter_review_lut:
return ipn
def clean_nodes(self):
for node in self._temp_nodes:
nuke.delete(node)
self.log.info("Deleted nodes...")
class ExporterReviewLut(ExporterReview):
"""
Generator object for review lut from Nuke
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
def __init__(self,
klass,
instance,
name=None,
ext=None,
cube_size=None,
lut_size=None,
lut_style=None):
# initialize parent class
ExporterReview.__init__(self, klass, instance)
# deal with now lut defined in viewer lut
if hasattr(klass, "viewer_lut_raw"):
self.viewer_lut_raw = klass.viewer_lut_raw
else:
self.viewer_lut_raw = False
self.name = name or "baked_lut"
self.ext = ext or "cube"
self.cube_size = cube_size or 32
self.lut_size = lut_size or 1024
self.lut_style = lut_style or "linear"
# set frame start / end and file name to self
self.get_file_info()
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")
def generate_lut(self):
# ---------- start nodes creation
# CMSTestPattern
cms_node = nuke.createNode("CMSTestPattern")
cms_node["cube_size"].setValue(self.cube_size)
# connect
self._temp_nodes.append(cms_node)
self.previous_node = cms_node
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
# Node View Process
ipn = self.get_view_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes.append(ipn)
self.previous_node = ipn
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
# GenerateLUT
gen_lut_node = nuke.createNode("GenerateLUT")
gen_lut_node["file"].setValue(self.path)
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
gen_lut_node["lut1d"].setValue(self.lut_size)
gen_lut_node["style1d"].setValue(self.lut_style)
# connect
gen_lut_node.setInput(0, self.previous_node)
self._temp_nodes.append(gen_lut_node)
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
# ---------- end nodes creation
# Export lut file
nuke.execute(
gen_lut_node.name(),
int(self.first_frame),
int(self.first_frame))
self.log.info("Exported...")
# ---------- generate representation data
self.get_representation_data()
self.log.debug("Representation... `{}`".format(self.data))
# ---------- Clean up
self.clean_nodes()
return self.data
class ExporterReviewMov(ExporterReview):
"""
Metaclass for generating review mov files
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
def __init__(self,
klass,
instance,
name=None,
ext=None,
):
# initialize parent class
ExporterReview.__init__(self, klass, instance)
# passing presets for nodes to self
if hasattr(klass, "nodes"):
self.nodes = klass.nodes
else:
self.nodes = {}
# deal with now lut defined in viewer lut
if hasattr(klass, "viewer_lut_raw"):
self.viewer_lut_raw = klass.viewer_lut_raw
else:
self.viewer_lut_raw = False
self.name = name or "baked"
self.ext = ext or "mov"
# set frame start / end and file name to self
self.get_file_info()
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")
def render(self, render_node_name):
self.log.info("Rendering... ")
# Render Write node
nuke.execute(
render_node_name,
int(self.first_frame),
int(self.last_frame))
self.log.info("Rendered...")
def save_file(self):
import shutil
with anlib.maintained_selection():
self.log.info("Saving nodes as file... ")
# create nk path
path = os.path.splitext(self.path)[0] + ".nk"
# save file to the path
shutil.copyfile(self.instance.context.data["currentFile"], path)
self.log.info("Nodes exported...")
return path
def generate_mov(self, farm=False):
# ---------- start nodes creation
# Read node
r_node = nuke.createNode("Read")
r_node["file"].setValue(self.path_in)
r_node["first"].setValue(self.first_frame)
r_node["origfirst"].setValue(self.first_frame)
r_node["last"].setValue(self.last_frame)
r_node["origlast"].setValue(self.last_frame)
# connect
self._temp_nodes.append(r_node)
self.previous_node = r_node
self.log.debug("Read... `{}`".format(self._temp_nodes))
# View Process node
ipn = self.get_view_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes.append(ipn)
self.previous_node = ipn
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
if not self.viewer_lut_raw:
# OCIODisplay node
dag_node = nuke.createNode("OCIODisplay")
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
# Write node
write_node = nuke.createNode("Write")
self.log.debug("Path: {}".format(self.path))
write_node["file"].setValue(self.path)
write_node["file_type"].setValue(self.ext)
write_node["meta_codec"].setValue("ap4h")
write_node["mov64_codec"].setValue("ap4h")
write_node["mov64_write_timecode"].setValue(1)
write_node["raw"].setValue(1)
# connect
write_node.setInput(0, self.previous_node)
self._temp_nodes.append(write_node)
self.log.debug("Write... `{}`".format(self._temp_nodes))
# ---------- end nodes creation
# ---------- render or save to nk
if farm:
nuke.scriptSave()
path_nk = self.save_file()
self.data.update({
"bakeScriptPath": path_nk,
"bakeWriteNodeName": write_node.name(),
"bakeRenderPath": self.path
})
else:
self.render(write_node.name())
# ---------- generate representation data
self.get_representation_data(
tags=["review", "delete"],
range=True
)
self.log.debug("Representation... `{}`".format(self.data))
# ---------- Clean up
self.clean_nodes()
nuke.scriptSave()
return self.data
def get_dependent_nodes(nodes):
"""Get all dependent nodes connected to the list of nodes.
@ -1401,3 +1581,70 @@ def get_dependent_nodes(nodes):
})
return connections_in, connections_out
def find_free_space_to_paste_nodes(
nodes,
group=nuke.root(),
direction="right",
offset=300):
"""
For getting coordinates in DAG (node graph) for placing new nodes
Arguments:
nodes (list): list of nuke.Node objects
group (nuke.Node) [optional]: object in which context it is
direction (str) [optional]: where we want it to be placed
[left, right, top, bottom]
offset (int) [optional]: what offset it is from rest of nodes
Returns:
xpos (int): x coordinace in DAG
ypos (int): y coordinace in DAG
"""
if len(nodes) == 0:
return 0, 0
group_xpos = list()
group_ypos = list()
# get local coordinates of all nodes
nodes_xpos = [n.xpos() for n in nodes] + \
[n.xpos() + n.screenWidth() for n in nodes]
nodes_ypos = [n.ypos() for n in nodes] + \
[n.ypos() + n.screenHeight() for n in nodes]
# get complete screen size of all nodes to be placed in
nodes_screen_width = max(nodes_xpos) - min(nodes_xpos)
nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos)
# get screen size (r,l,t,b) of all nodes in `group`
with group:
group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \
[n.xpos() + n.screenWidth() for n in nuke.allNodes()
if n not in nodes]
group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \
[n.ypos() + n.screenHeight() for n in nuke.allNodes()
if n not in nodes]
# calc output left
if direction in "left":
xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset)
ypos = min(group_ypos)
return xpos, ypos
# calc output right
if direction in "right":
xpos = max(group_xpos) + abs(offset)
ypos = min(group_ypos)
return xpos, ypos
# calc output top
if direction in "top":
xpos = min(group_xpos)
ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset)
return xpos, ypos
# calc output bottom
if direction in "bottom":
xpos = min(group_xpos)
ypos = max(group_ypos) + abs(offset)
return xpos, ypos

View file

@ -22,19 +22,16 @@ def has_unsaved_changes():
def save_file(filepath):
file = os.path.basename(filepath)
project = hiero.core.projects()[-1]
# close `Untitled` project
if "Untitled" not in project.name():
log.info("Saving project: `{}`".format(project.name()))
if project:
log.info("Saving project: `{}` as '{}'".format(project.name(), file))
project.saveAs(filepath)
elif not project:
else:
log.info("Creating new project...")
project = hiero.core.newProject()
project.saveAs(filepath)
else:
log.info("Dropping `Untitled` project...")
return
def open_file(filepath):

View file

@ -0,0 +1,32 @@
"""Create a model asset."""
import bpy
from avalon import api
from avalon.blender import Creator, lib
class CreateModel(Creator):
"""Polygonal static geometry"""
name = "modelMain"
label = "Model"
family = "model"
icon = "cube"
def process(self):
import pype.blender
asset = self.data["asset"]
subset = self.data["subset"]
name = pype.blender.plugin.model_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,315 @@
"""Load a model asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import avalon.blender.pipeline
import bpy
import pype.blender
from avalon import api
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
class BlendModelLoader(pype.blender.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["model"]
representations = ["blend"]
label = "Link Model"
icon = "code-fork"
color = "orange"
@staticmethod
def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]:
"""Find the collection(s) with name, loaded from libpath.
Note:
It is assumed that only 1 matching collection is found.
"""
for collection in bpy.data.collections:
if collection.name != name:
continue
if collection.library is None:
continue
if not collection.library.filepath:
continue
collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve())
normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve())
if collection_lib_path == normalized_libpath:
return collection
return None
@staticmethod
def _collection_contains_object(
collection: bpy.types.Collection, object: bpy.types.Object
) -> bool:
"""Check if the collection contains the object."""
for obj in collection.objects:
if obj == object:
return True
return False
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.blender.plugin.model_name(asset, subset)
container_name = pype.blender.plugin.model_name(
asset, subset, namespace
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
if not instance_empty.get("avalon"):
instance_empty["avalon"] = dict()
avalon_info = instance_empty["avalon"]
avalon_info.update({"container_name": container_name})
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
container = bpy.data.collections[lib_container]
container.name = container_name
instance_empty.instance_collection = container
container.make_local()
avalon.blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
nodes = list(container.objects)
nodes.append(container)
nodes.append(instance_empty)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.debug(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_libpath = (
self._get_library_from_container(collection).filepath
)
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
# Let Blender's garbage collection take care of removing the library
# itself after removing the objects.
objects_to_remove = set()
collection_objects = list()
collection_objects[:] = collection.objects
for obj in collection_objects:
# Unlink every object
collection.objects.unlink(obj)
remove_obj = True
for coll in [
coll for coll in bpy.data.collections
if coll != collection
]:
if (
coll.objects and
self._collection_contains_object(coll, obj)
):
remove_obj = False
if remove_obj:
objects_to_remove.add(obj)
for obj in objects_to_remove:
# Only delete objects that are not used elsewhere
bpy.data.objects.remove(obj)
instance_empties = [
obj for obj in collection.users_dupli_group
if obj.name in collection.name
]
if instance_empties:
instance_empty = instance_empties[0]
container_name = instance_empty["avalon"]["container_name"]
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [container_name]
new_collection = self._get_lib_collection(container_name, libpath)
if new_collection is None:
raise ValueError(
"A matching collection '{container_name}' "
"should have been found in: {libpath}"
)
for obj in new_collection.objects:
collection.objects.link(obj)
bpy.data.collections.remove(new_collection)
# Update the representation on the collection
avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY]
avalon_prop["representation"] = str(representation["_id"])
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
instance_parents = list(collection.users_dupli_group)
instance_objects = list(collection.objects)
for obj in instance_objects + instance_parents:
bpy.data.objects.remove(obj)
bpy.data.collections.remove(collection)
return True
class CacheModelLoader(pype.blender.AssetLoader):
"""Load cache models.
Stores the imported asset in a collection named after the asset.
Note:
At least for now it only supports Alembic files.
"""
families = ["model"]
representations = ["abc"]
label = "Link Model"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
raise NotImplementedError("Loading of Alembic files is not yet implemented.")
# TODO (jasper): implement Alembic import.
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
lib_container = container_name = (
pype.blender.plugin.model_name(asset, subset, namespace)
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (data_from, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
collection = bpy.data.collections[lib_container]
collection.name = container_name
instance_empty.instance_collection = collection
nodes = list(collection.objects)
nodes.append(collection)
nodes.append(instance_empty)
self[:] = nodes
return nodes

View file

@ -0,0 +1,16 @@
import bpy
import pyblish.api
class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Blender Current File"
hosts = ['blender']
def process(self, context):
"""Inject the current working file"""
current_file = bpy.data.filepath
context.data['currentFile'] = current_file

View file

@ -0,0 +1,53 @@
import typing
from typing import Generator
import bpy
import avalon.api
import pyblish.api
from avalon.blender.pipeline import AVALON_PROPERTY
class CollectModel(pyblish.api.ContextPlugin):
"""Collect the data of a model."""
hosts = ["blender"]
label = "Collect Model"
order = pyblish.api.CollectorOrder
@staticmethod
def get_model_collections() -> Generator:
"""Return all 'model' collections.
Check if the family is 'model' and if it doesn't have the
representation set. If the representation is set, it is a loaded model
and we don't want to publish it.
"""
for collection in bpy.data.collections:
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
if (avalon_prop.get('family') == 'model'
and not avalon_prop.get('representation')):
yield collection
def process(self, context):
"""Collect the models from the current Blender scene."""
collections = self.get_model_collections()
for collection in collections:
avalon_prop = collection[AVALON_PROPERTY]
asset = avalon_prop['asset']
family = avalon_prop['family']
subset = avalon_prop['subset']
task = avalon_prop['task']
name = f"{asset}_{subset}"
instance = context.create_instance(
name=name,
family=family,
families=[family],
subset=subset,
asset=asset,
task=task,
)
members = list(collection.objects)
members.append(collection)
instance[:] = members
self.log.debug(instance.data)

View file

@ -0,0 +1,47 @@
import os
import avalon.blender.workio
import pype.api
class ExtractModel(pype.api.Extractor):
"""Extract as model."""
label = "Model"
hosts = ["blender"]
families = ["model"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Just save the file to a temporary location. At least for now it's no
# problem to have (possibly) extra stuff in the file.
avalon.blender.workio.save_file(filepath, copy=True)
#
# # Store reference for integration
# if "files" not in instance.data:
# instance.data["files"] = list()
#
# # instance.data["files"].append(filename)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s", instance.name, representation)

View file

@ -0,0 +1,49 @@
from typing import List
import bpy
import pyblish.api
import pype.blender.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
label = "Mesh Has UV's"
actions = [pype.blender.action.SelectInvalidAction]
optional = True
@staticmethod
def has_uvs(obj: bpy.types.Object) -> bool:
"""Check if an object has uv's."""
if not obj.data.uv_layers:
return False
for uv_layer in obj.data.uv_layers:
for polygon in obj.data.polygons:
for loop_index in polygon.loop_indices:
if not uv_layer.data[loop_index].uv:
return False
return True
@classmethod
def get_invalid(cls, instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
# Make sure we are in object mode.
bpy.ops.object.mode_set(mode='OBJECT')
if not cls.has_uvs(obj):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}")

View file

@ -0,0 +1,35 @@
from typing import List
import bpy
import pyblish.api
import pype.blender.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
label = "Mesh No Negative Scale"
actions = [pype.blender.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
if any(v < 0 for v in obj.scale):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Meshes found in instance with negative scale: {invalid}"
)

View file

@ -188,14 +188,18 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
# Adding Custom Attributes
for attr, val in assetversion_cust_attrs.items():
if attr in assetversion_entity["custom_attributes"]:
assetversion_entity["custom_attributes"][attr] = val
continue
try:
assetversion_entity["custom_attributes"][attr] = val
session.commit()
continue
except Exception:
session.rollback()
self.log.warning((
"Custom Attrubute \"{0}\""
" is not available for AssetVersion."
" Can't set it's value to: \"{1}\""
).format(attr, str(val)))
" is not available for AssetVersion <{1}>."
" Can't set it's value to: \"{2}\""
).format(attr, assetversion_entity["id"], str(val)))
# Have to commit the version and asset, because location can't
# determine the final location without.

View file

@ -12,7 +12,6 @@ import os
import re
import copy
import json
from pprint import pformat
import pyblish.api
from avalon import api
@ -91,13 +90,21 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder - 0.0001
targets = ["filesequence"]
label = "RenderedFrames"
def process(self, context):
pixel_aspect = 1
resolution_width = 1920
resolution_height = 1080
lut_path = None
slate_frame = None
families_data = None
subset = None
version = None
frame_start = 0
frame_end = 0
if os.environ.get("PYPE_PUBLISH_PATHS"):
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
self.log.info("Collecting paths: {}".format(paths))
@ -123,6 +130,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
cwd = os.path.dirname(path)
root_override = data.get("root")
frame_start = int(data.get("frameStart"))
frame_end = int(data.get("frameEnd"))
subset = data.get("subset")
if root_override:
if os.path.isabs(root_override):
root = root_override
@ -148,7 +159,13 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
if instance:
instance_family = instance.get("family")
pixel_aspect = instance.get("pixelAspect", 1)
resolution_width = instance.get("resolutionWidth", 1920)
resolution_height = instance.get("resolutionHeight", 1080)
lut_path = instance.get("lutPath", None)
baked_mov_path = instance.get("bakeRenderPath")
families_data = instance.get("families")
slate_frame = instance.get("slateFrame")
version = instance.get("version")
else:
# Search in directory
@ -156,35 +173,36 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
root = path
self.log.info("Collecting: {}".format(root))
regex = data.get("regex")
if baked_mov_path:
regex = "^{}.*$".format(subset)
if regex:
self.log.info("Using regex: {}".format(regex))
if "slate" in families_data:
frame_start -= 1
collections, remainder = collect(
root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
frame_start=data.get("frameStart"),
frame_end=data.get("frameEnd"),
frame_start=frame_start,
frame_end=frame_end,
)
self.log.info("Found collections: {}".format(collections))
"""
if data.get("subset"):
# If subset is provided for this json then it must be a single
# collection.
if len(collections) > 1:
self.log.error("Forced subset can only work with a single "
"found sequence")
raise RuntimeError("Invalid sequence")
"""
self.log.info("Found remainder: {}".format(remainder))
fps = data.get("fps", 25)
if data.get("user"):
context.data["user"] = data["user"]
if data.get("version"):
version = data.get("version")
# Get family from the data
families = data.get("families", ["render"])
if "render" not in families:
@ -193,6 +211,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
families.append("ftrack")
if "write" in instance_family:
families.append("write")
if families_data and "slate" in families_data:
families.append("slate")
if data.get("attachTo"):
# we need to attach found collections to existing
@ -213,11 +233,13 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": data.get("frameStart"),
"frameEnd": data.get("frameEnd"),
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height
})
if "representations" not in instance.data:
@ -242,31 +264,47 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
instance.data["representations"].append(
representation)
elif data.get("subset"):
elif subset:
# if we have subset - add all collections and known
# reminder as representations
# take out review family if mov path
# this will make imagesequence none review
if baked_mov_path:
self.log.info(
"Baked mov is available {}".format(
baked_mov_path))
families.append("review")
if session['AVALON_APP'] == "maya":
families.append("review")
self.log.info(
"Adding representations to subset {}".format(
data.get("subset")))
subset))
instance = context.create_instance(data.get("subset"))
instance = context.create_instance(subset)
data = copy.deepcopy(data)
instance.data.update(
{
"name": data.get("subset"),
"name": subset,
"family": families[0],
"families": list(families),
"subset": data.get("subset"),
"subset": subset,
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": data.get("frameStart"),
"frameEnd": data.get("frameEnd"),
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"slateFrame": slate_frame,
"version": version
}
)
@ -278,31 +316,53 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
ext = collection.tail.lstrip(".")
if "slate" in instance.data["families"]:
frame_start += 1
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": frame_start,
"frameEnd": frame_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
"tags": ["review"] if not baked_mov_path else [],
}
instance.data["representations"].append(
representation)
# filter out only relevant mov in case baked available
self.log.debug("__ remainder {}".format(remainder))
if baked_mov_path:
remainder = [r for r in remainder
if r in baked_mov_path]
self.log.debug("__ remainder {}".format(remainder))
# process reminders
for rem in remainder:
# add only known types to representation
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
self.log.info(" . {}".format(rem))
if "slate" in instance.data["families"]:
frame_start += 1
tags = ["review"]
if baked_mov_path:
tags.append("delete")
representation = {
"name": rem.split(".")[-1],
"ext": "{}".format(rem.split(".")[-1]),
"files": rem,
"stagingDir": root,
"frameStart": frame_start,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
"tags": tags
}
instance.data["representations"].append(
representation)
@ -344,6 +404,9 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"version": version
}
)
if lut_path:
@ -365,3 +428,18 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"tags": ["review"],
}
instance.data["representations"].append(representation)
# temporary ... allow only beauty on ftrack
if session['AVALON_APP'] == "maya":
AOV_filter = ['beauty']
for aov in AOV_filter:
if aov not in instance.data['subset']:
instance.data['families'].remove('review')
instance.data['families'].remove('ftrack')
representation["tags"].remove('review')
self.log.debug(
"__ representations {}".format(
instance.data["representations"]))
self.log.debug(
"__ instance.data {}".format(instance.data))

View file

@ -31,32 +31,44 @@ class CollectTemplates(pyblish.api.InstancePlugin):
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
@ -76,7 +88,18 @@ class CollectTemplates(pyblish.api.InstancePlugin):
"subset": subset_name,
"version": version_number,
"hierarchy": hierarchy.replace("\\", "/"),
"representation": "TEMP"}
"representation": "TEMP")}
resolution_width = instance.data.get("resolutionWidth")
resolution_height = instance.data.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data

View file

@ -4,6 +4,7 @@ import copy
import pype.api
import pyblish
from pypeapp import config
class ExtractBurnin(pype.api.Extractor):
@ -25,11 +26,8 @@ class ExtractBurnin(pype.api.Extractor):
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
# TODO: expand burnin data list to include all usefull keys
version = ''
if instance.context.data.get('version'):
version = "v" + str(instance.context.data['version'])
version = instance.context.data.get(
'version', instance.data.get('version'))
frame_start = int(instance.data.get("frameStart") or 0)
frame_end = int(instance.data.get("frameEnd") or 1)
duration = frame_end - frame_start + 1
@ -41,10 +39,30 @@ class ExtractBurnin(pype.api.Extractor):
"frame_start": frame_start,
"frame_end": frame_end,
"duration": duration,
"version": version,
"comment": instance.context.data.get("comment"),
"intent": instance.context.data.get("intent")
"version": int(version),
"comment": instance.context.data.get("comment", ""),
"intent": instance.context.data.get("intent", "")
}
# Add datetime data to preparation data
prep_data.update(config.get_datetime_data())
slate_frame_start = frame_start
slate_frame_end = frame_end
slate_duration = duration
# exception for slate workflow
if "slate" in instance.data["families"]:
slate_frame_start = frame_start - 1
slate_frame_end = frame_end
slate_duration = slate_frame_end - slate_frame_start + 1
prep_data.update({
"slate_frame_start": slate_frame_start,
"slate_frame_end": slate_frame_end,
"slate_duration": slate_duration
})
# Update data with template data
template_data = instance.data.get("assumedTemplateData") or {}
prep_data.update(template_data)
@ -63,7 +81,8 @@ class ExtractBurnin(pype.api.Extractor):
filename = "{0}".format(repre["files"])
name = "_burnin"
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
ext = os.path.splitext(filename)[1]
movieFileBurnin = filename.replace(ext, "") + name + ext
full_movie_path = os.path.join(
os.path.normpath(stagingdir), repre["files"]

View file

@ -20,6 +20,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
hosts = ["shell"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
enabled = False
def process(self, instance):
start = instance.data.get("frameStart")
@ -28,51 +29,74 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
collected_frames = os.listdir(stagingdir)
collections, remainder = clique.assemble(collected_frames)
input_file = (
collections[0].format('{head}{padding}{tail}') % start
)
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
self.log.info("subset {}".format(instance.data['subset']))
if 'crypto' in instance.data['subset']:
return
filename = collections[0].format('{head}')
if not filename.endswith('.'):
filename += "."
jpegFile = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpegFile)
# get representation and loop them
representations = instance.data["representations"]
self.log.info("output {}".format(full_output_path))
# filter out mov and img sequences
representations_new = representations[:]
config_data = instance.context.data['output_repre_config']
for repre in representations:
self.log.debug(repre)
if 'review' not in repre['tags']:
return
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
input_file = repre['files'][0]
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start
# )
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
subprocess_jpeg = " ".join(jpeg_items)
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpegFile = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpegFile)
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
self.log.info("output {}".format(full_output_path))
if "representations" not in instance.data:
instance.data["representations"] = []
config_data = instance.context.data['output_repre_config']
representation = {
'name': 'jpg',
'ext': 'jpg',
'files': jpegFile,
"stagingDir": stagingdir,
"thumbnail": True
}
instance.data["representations"].append(representation)
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
subprocess_jpeg = " ".join(jpeg_items)
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'jpg',
'ext': 'jpg',
'files': jpegFile,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ['thumbnail']
}
# adding representation
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
instance.data["representations"] = representations_new

View file

@ -1,5 +1,4 @@
import os
import math
import pyblish.api
import clique
import pype.api
@ -25,19 +24,21 @@ class ExtractReview(pyblish.api.InstancePlugin):
ext_filter = []
def process(self, instance):
to_width = 1920
to_height = 1080
output_profiles = self.outputs or {}
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("frameStart")
resolution_height = instance.data.get("resolutionHeight", 1080)
resolution_width = instance.data.get("resolutionWidth", 1920)
pixel_aspect = instance.data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(instance.data["families"]))
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(inst_data["families"]))
# get representation and loop them
representations = instance.data["representations"]
representations = inst_data["representations"]
# filter out mov and img sequences
representations_new = representations[:]
@ -45,6 +46,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
if repre['ext'] in self.ext_filter:
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" in tags:
@ -56,10 +60,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
if not ext:
ext = "mov"
self.log.warning(
"`ext` attribute not in output profile. Setting to default ext: `mov`")
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug("instance.families: {}".format(instance.data['families']))
self.log.debug("profile.families: {}".format(profile['families']))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
if any(item in instance.data['families'] for item in profile['families']):
if isinstance(repre["files"], list):
@ -114,8 +122,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append("-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
@ -155,13 +164,43 @@ class ExtractReview(pyblish.api.InstancePlugin):
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
lb = profile.get('letter_box', 0)
if lb is not 0:
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
output_args.append(
"-filter:v scale=1920x1080:flags=lanczos,setsar=1,drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
@ -169,35 +208,56 @@ class ExtractReview(pyblish.api.InstancePlugin):
# output filename
output_args.append(full_output_path)
self.log.debug("__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug("__ resolution_width: `{}`".format(resolution_width))
self.log.debug("__ resolution_height: `{}`".format(resolution_height))
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
width_scale = 1920
width_half_pad = 0
res_w = int(float(resolution_width) * pixel_aspect)
height_half_pad = int((
(res_w - 1920) / (
res_w * .01) * (
1080 * .01)) / 2
)
height_scale = 1080 - (height_half_pad * 2)
if height_scale > 1080:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
height_scale = 1080
width_half_pad = (1920 - (float(resolution_width) * (1080 / float(resolution_height))) ) / 2
width_scale = int(1920 - (width_half_pad * 2))
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug("__ width_scale: `{}`".format(width_scale))
self.log.debug("__ width_half_pad: `{}`".format(width_half_pad))
self.log.debug("__ height_scale: `{}`".format(height_scale))
self.log.debug("__ height_half_pad: `{}`".format(height_half_pad))
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad=1920:1080:{2}:{3}:black,setsar=1".format(
width_scale, height_scale, width_half_pad, height_half_pad
)
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
@ -225,7 +285,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug("_ output_args: `{}`".format(output_args))
self.log.debug(
"_ output_args: `{}`".format(output_args))
mov_args = [
os.path.join(
@ -249,7 +310,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if repre_new.get('preview'):
repre_new.pop("preview")

View file

@ -0,0 +1,243 @@
import os
import pype.api
import pyblish
class ExtractReviewSlate(pype.api.Extractor):
"""
Will add slate frame at the start of the video files
"""
label = "Review with Slate frame"
order = pyblish.api.ExtractorOrder + 0.031
families = ["slate"]
hosts = ["nuke", "maya", "shell"]
optional = True
def process(self, instance):
inst_data = instance.data
if "representations" not in inst_data:
raise RuntimeError("Burnin needs already created mov to work on.")
suffix = "_slate"
slate_path = inst_data.get("slateFrame")
ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg")
to_width = 1920
to_height = 1080
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
fps = inst_data.get("fps")
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
for i, repre in enumerate(inst_data["representations"]):
_remove_at_end = []
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
p_tags = repre.get("tags", [])
if "slate-frame" not in p_tags:
continue
stagingdir = repre["stagingDir"]
input_file = "{0}".format(repre["files"])
ext = os.path.splitext(input_file)[1]
output_file = input_file.replace(ext, "") + suffix + ext
input_path = os.path.join(
os.path.normpath(stagingdir), repre["files"])
self.log.debug("__ input_path: {}".format(input_path))
_remove_at_end.append(input_path)
output_path = os.path.join(
os.path.normpath(stagingdir), output_file)
self.log.debug("__ output_path: {}".format(output_path))
input_args = []
output_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(repre["_profile"].get('input', []))
input_args.append("-loop 1 -i {}".format(slate_path))
input_args.extend([
"-r {}".format(fps),
"-t 0.04"]
)
# output args
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
# make sure colors are correct
output_args.extend([
"-vf scale=out_color_matrix=bt709",
"-color_primaries bt709",
"-color_trc bt709",
"-colorspace bt709"
])
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
slate_v_path = slate_path.replace(".png", ext)
output_args.append(slate_v_path)
_remove_at_end.append(slate_v_path)
slate_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
slate_subprcs_cmd = " ".join(slate_args)
# run slate generation subprocess
self.log.debug("Slate Executing: {}".format(slate_subprcs_cmd))
slate_output = pype.api.subprocess(slate_subprcs_cmd)
self.log.debug("Slate Output: {}".format(slate_output))
# create ffmpeg concat text file path
conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt"
conc_text_path = os.path.join(
os.path.normpath(stagingdir), conc_text_file)
_remove_at_end.append(conc_text_path)
self.log.debug("__ conc_text_path: {}".format(conc_text_path))
new_line = "\n"
with open(conc_text_path, "w") as conc_text_f:
conc_text_f.writelines([
"file {}".format(
slate_v_path.replace("\\", "/")),
new_line,
"file {}".format(input_path.replace("\\", "/"))
])
# concat slate and videos together
conc_input_args = ["-y", "-f concat", "-safe 0"]
conc_input_args.append("-i {}".format(conc_text_path))
conc_output_args = ["-c copy"]
conc_output_args.append(output_path)
concat_args = [
ffmpeg_path,
" ".join(conc_input_args),
" ".join(conc_output_args)
]
concat_subprcs_cmd = " ".join(concat_args)
# ffmpeg concat subprocess
self.log.debug("Executing concat: {}".format(concat_subprcs_cmd))
concat_output = pype.api.subprocess(concat_subprcs_cmd)
self.log.debug("Output concat: {}".format(concat_output))
self.log.debug("__ repre[tags]: {}".format(repre["tags"]))
repre_update = {
"files": output_file,
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]
}
inst_data["representations"][i].update(repre_update)
self.log.debug(
"_ representation {}: `{}`".format(
i, inst_data["representations"][i]))
# removing temp files
for f in _remove_at_end:
os.remove(f)
self.log.debug("Removed: `{}`".format(f))
# Remove any representations tagged for deletion.
for repre in inst_data.get("representations", []):
if "delete" in repre.get("tags", []):
self.log.debug("Removing representation: {}".format(repre))
inst_data["representations"].remove(repre)
self.log.debug(inst_data["representations"])
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Args:
args (list): list of string arguments
inserting_arg (str): string argument we want to add
(without flag `-vf`)
Returns:
str: long joined argument to be added back to list of arguments
"""
# find all video format settings
vf_settings = [p for p in args
for v in ["-filter:v", "-vf"]
if v in p]
self.log.debug("_ vf_settings: `{}`".format(vf_settings))
# remove them from output args list
for p in vf_settings:
self.log.debug("_ remove p: `{}`".format(p))
args.remove(p)
self.log.debug("_ args: `{}`".format(args))
# strip them from all flags
vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "")
for p in vf_settings]
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
vf_fixed.insert(0, inserting_arg)
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
# create new video filter setting
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back

View file

@ -84,9 +84,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -94,10 +96,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -318,9 +324,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]

View file

@ -82,31 +82,40 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:

View file

@ -7,6 +7,7 @@ import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
@ -154,9 +155,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
io.install()
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -164,10 +167,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -176,16 +183,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
# self.log.info("Verifying version from assumed destination")
# assumed_data = instance.data["assumedTemplateData"]
# assumed_version = assumed_data["version"]
# if assumed_version != next_version:
# raise AttributeError("Assumed version 'v{0:03d}' does not match"
# "next version in database "
# "('v{1:03d}')".format(assumed_version,
# next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
@ -271,6 +268,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"version": int(version["name"]),
"hierarchy": hierarchy}
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
files = repre['files']
if repre.get('stagingDir'):
stagingdir = repre['stagingDir']
@ -324,6 +332,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
repre.get("frameEnd")))
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
@ -358,7 +370,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_head,
dst_start_frame,
dst_tail).replace("..", ".")
repre['published_path'] = dst
repre['published_path'] = self.unc_convert(dst)
else:
# Single file
@ -387,7 +399,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data["transfers"].append([src, dst])
repre['published_path'] = dst
repre['published_path'] = self.unc_convert(dst)
self.log.debug("__ dst: {}".format(dst))
representation = {
@ -415,6 +427,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
}
}
if repre.get("outputName"):
representation["context"]["output"] = repre['outputName']
if sequence_repre and repre.get("frameStart"):
representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
@ -461,6 +476,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def unc_convert(self, path):
self.log.debug("> __ path: `{}`".format(path))
drive, _path = os.path.splitdrive(path)
self.log.debug("> __ drive, _path: `{}`, `{}`".format(drive, _path))
if not os.path.exists(drive + "/"):
self.log.info("Converting to unc from environments ..")
path_replace = os.getenv("PYPE_STUDIO_PROJECTS_PATH")
path_mount = os.getenv("PYPE_STUDIO_PROJECTS_MOUNT")
if "/" in path_mount:
path = path.replace(path_mount[0:-1], path_replace)
else:
path = path.replace(path_mount, path_replace)
return path
def copy_file(self, src, dst):
""" Copy given source to destination
@ -470,8 +502,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
Returns:
None
"""
src = os.path.normpath(src)
dst = os.path.normpath(dst)
src = self.unc_convert(src)
dst = self.unc_convert(dst)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)
@ -492,6 +524,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
src = self.unc_convert(src)
dst = self.unc_convert(dst)
try:
os.makedirs(dirname)
except OSError as e:
@ -504,9 +540,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
@ -597,7 +635,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
"fps": context.data.get(
"fps", instance.data.get("fps"))}
# Include optional data if present in
optionals = [

View file

@ -88,9 +88,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -98,10 +100,14 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -251,9 +257,6 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
@ -332,9 +335,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]

View file

@ -21,20 +21,34 @@ def _get_script():
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
module_path = os.path.normpath(module_path)
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
module_path = module_path.replace(mount_root, network_root)
return module_path
# Logic to retrieve latest files concerning extendFrames
def get_latest_version(asset_name, subset_name, family):
# Get asset
asset_name = io.find_one({"type": "asset",
"name": asset_name},
projection={"name": True})
asset_name = io.find_one(
{
"type": "asset",
"name": asset_name
},
projection={"name": True}
)
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]},
projection={"_id": True, "name": True})
subset = io.find_one(
{
"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]
},
projection={"_id": True, "name": True}
)
# Check if subsets actually exists (pre-run check)
assert subset, "No subsets found, please publish with `extendFrames` off"
@ -45,11 +59,15 @@ def get_latest_version(asset_name, subset_name, family):
"data.endFrame": True,
"parent": True}
version = io.find_one({"type": "version",
"parent": subset["_id"],
"data.families": family},
projection=version_projection,
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"],
"data.families": family
},
projection=version_projection,
sort=[("name", -1)]
)
assert version, "No version found, this is a bug"
@ -143,7 +161,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"PYPE_ROOT"
"PYPE_ROOT",
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT"
]
def _submit_deadline_post_job(self, instance, job):
@ -154,7 +174,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"""
data = instance.data.copy()
subset = data["subset"]
state = data.get("publishJobState", "Suspended")
job_name = "{batch} - {subset} [publish image sequence]".format(
batch=job["Props"]["Name"],
subset=subset
@ -164,6 +183,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
output_dir = instance.data["outputDir"]
metadata_path = os.path.join(output_dir, metadata_filename)
metadata_path = os.path.normpath(metadata_path)
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
network_root = os.path.normpath(
os.environ['PYPE_STUDIO_PROJECTS_PATH'])
metadata_path = metadata_path.replace(mount_root, network_root)
# Generate the payload for Deadline submission
payload = {
"JobInfo": {
@ -174,7 +200,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"InitialStatus": state,
"Priority": job["Props"]["Pri"]
},
"PluginInfo": {
@ -192,6 +217,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# job so they use the same environment
environment = job["Props"].get("Env", {})
i = 0
for index, key in enumerate(environment):
self.log.info("KEY: {}".format(key))
@ -307,6 +333,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"user": context.data["user"],
"version": context.data["version"],
"attachTo": attach_subset_versions,
"intent": context.data.get("intent"),
"comment": context.data.get("comment"),
# Optional metadata (for debugging)
"metadata": {
"instance": data,
@ -315,6 +343,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
}
}
if api.Session["AVALON_APP"] == "nuke":
metadata['subset'] = subset
if submission_type == "muster":
ftrack = {
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),

View file

@ -116,9 +116,11 @@ class LookLoader(pype.maya.plugin.ReferenceLoader):
shapes=True))
nodes = set(nodes_list)
json_representation = io.find_one({"type": "representation",
"parent": representation['parent'],
"name": "json"})
json_representation = io.find_one({
"type": "representation",
"parent": representation['parent'],
"name": "json"
})
# Load relationships
shader_relation = api.get_representation_path(json_representation)

View file

@ -21,15 +21,17 @@ class CollectAssData(pyblish.api.InstancePlugin):
objsets = instance.data['setMembers']
for objset in objsets:
objset = str(objset)
members = cmds.sets(objset, query=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
if objset == "content_SET":
if "content_SET" in objset:
instance.data['setMembers'] = members
elif objset == "proxy_SET":
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
assert len(members) == 1, "You have multiple proxy meshes, please only use one"
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
self.log.debug("data: {}".format(instance.data))

View file

@ -119,11 +119,15 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
texture_filenames = []
if image_search_paths:
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
# Later on check whether this is pipeline OS cross-compatible.
image_search_paths = [p for p in
image_search_paths.split(os.path.pathsep) if p]
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
image_search_paths = self._replace_tokens(image_search_paths)
# List all related textures
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
self.log.info("Found %i texture(s)" % len(texture_filenames))
@ -140,6 +144,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
"atttribute'" % node)
# Collect all texture files
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
texture_filenames = self._replace_tokens(texture_filenames)
for texture in texture_filenames:
files = []
@ -283,3 +289,20 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
collection, remainder = clique.assemble(files, patterns=pattern)
return collection
def _replace_tokens(self, strings):
env_re = re.compile(r"\$\{(\w+)\}")
replaced = []
for s in strings:
matches = re.finditer(env_re, s)
for m in matches:
try:
s = s.replace(m.group(), os.environ[m.group(1)])
except KeyError:
msg = "Cannot find requested {} in environment".format(
m.group(1))
self.log.error(msg)
raise RuntimeError(msg)
replaced.append(s)
return replaced

View file

@ -17,6 +17,7 @@ class ExtractAssStandin(pype.api.Extractor):
label = "Ass Standin (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
@ -47,7 +48,7 @@ class ExtractAssStandin(pype.api.Extractor):
exported_files = cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=True,
asciiAss=self.asciiAss,
shadowLinks=True,
lightLinks=True,
boundingBox=True,
@ -59,13 +60,15 @@ class ExtractAssStandin(pype.api.Extractor):
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
else:
self.log.info("Extracting ass")
cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=True,
asciiAss=False,
shadowLinks=True,
lightLinks=True,
boundingBox=True
)
self.log.info("Extracted {}".format(filename))
filenames = filename
optionals = [
"frameStart", "frameEnd", "step", "handles",

View file

@ -429,33 +429,42 @@ class ExtractLook(pype.api.Extractor):
a_template = anatomy.templates
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True},
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one(
{"type": "asset", "name": asset_name, "parent": project["_id"]}
)
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'").format(asset_name, project_name)
silo = asset.get("silo")
subset = io.find_one(
{"type": "subset", "name": subset_name, "parent": asset["_id"]}
)
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)]
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version

View file

@ -38,9 +38,13 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin):
invalid = list()
asset = instance.data['asset']
asset_data = io.find_one({"name": asset,
"type": "asset"},
projection={"_id": True})
asset_data = io.find_one(
{
"name": asset,
"type": "asset"
},
projection={"_id": True}
)
asset_id = str(asset_data['_id'])
# We do want to check the referenced nodes as we it might be

View file

@ -49,9 +49,10 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin):
"""Check if subset is registered in the database under the asset"""
asset = io.find_one({"type": "asset", "name": asset_name})
is_valid = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
is_valid = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
return is_valid

View file

@ -6,9 +6,6 @@ from pype import api as pype
import nuke
log = pype.Logger().get_logger(__name__, "nuke")
class CrateRead(avalon.nuke.Creator):
# change this to template preset
name = "ReadCopy"

View file

@ -1,8 +0,0 @@
# create publishable read node usually used for enabling version tracking
# also useful for sharing across shots or assets
# if read nodes are selected it will convert them to centainer
# if no read node selected it will create read node and offer browser to shot resource folder
# type movie > mov or imagesequence
# type still > matpaint .psd, .tif, .png,

View file

@ -1,22 +1,14 @@
from collections import OrderedDict
import avalon.api
import avalon.nuke
from pype import api as pype
from pype.nuke import plugin
from pypeapp import config
import nuke
log = pype.Logger().get_logger(__name__, "nuke")
class CreateWriteRender(plugin.PypeCreator):
# change this to template preset
name = "WriteRender"
label = "Create Write Render"
hosts = ["nuke"]
nClass = "write"
n_class = "write"
family = "render"
icon = "sign-out"
defaults = ["Main", "Mask"]
@ -27,7 +19,7 @@ class CreateWriteRender(plugin.PypeCreator):
data = OrderedDict()
data["family"] = self.family
data["families"] = self.nClass
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
@ -35,7 +27,100 @@ class CreateWriteRender(plugin.PypeCreator):
self.data = data
self.nodes = nuke.selectedNodes()
self.log.info("self.data: '{}'".format(self.data))
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
assert len(nodes) < 2, self.log.error(
"Select only one node. The node you want to connect to, "
"or tick off `Use selection`")
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
if self.presets.get('fpath_template'):
self.log.info("Adding template path from preset")
write_data.update(
{"fpath_template": self.presets["fpath_template"]}
)
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
write_node = pnlib.create_write_node(
self.data["subset"],
write_data,
input=selected_node)
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
return write_node
class CreateWritePrerender(plugin.PypeCreator):
# change this to template preset
name = "WritePrerender"
label = "Create Write Prerender"
hosts = ["nuke"]
n_class = "write"
family = "prerender"
icon = "sign-out"
defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"]
def __init__(self, *args, **kwargs):
super(CreateWritePrerender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
@ -70,7 +155,7 @@ class CreateWriteRender(plugin.PypeCreator):
# recreate new
write_data = {
"class": self.nClass,
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
@ -83,12 +168,13 @@ class CreateWriteRender(plugin.PypeCreator):
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
"fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"})
write_node = pnlib.create_write_node(
self.data["subset"],
write_data,
input=selected_node)
input=selected_node,
prenodes=[])
# relinking to collected connections
for i, input in enumerate(inputs):
@ -99,77 +185,27 @@ class CreateWriteRender(plugin.PypeCreator):
for output in outputs:
output.setInput(0, write_node)
return write_node
# open group node
write_node.begin()
for n in nuke.allNodes():
# get write node
if n.Class() in "Write":
w_node = n
write_node.end()
#
# class CreateWritePrerender(avalon.nuke.Creator):
# # change this to template preset
# preset = "prerender"
#
# name = "WritePrerender"
# label = "Create Write Prerender"
# hosts = ["nuke"]
# family = "{}_write".format(preset)
# families = preset
# icon = "sign-out"
# defaults = ["Main", "Mask"]
#
# def __init__(self, *args, **kwargs):
# super(CreateWritePrerender, self).__init__(*args, **kwargs)
# self.presets = config.get_presets()['plugins']["nuke"]["create"].get(
# self.__class__.__name__, {}
# )
#
# data = OrderedDict()
#
# data["family"] = self.family.split("_")[1]
# data["families"] = self.families
#
# {data.update({k: v}) for k, v in self.data.items()
# if k not in data.keys()}
# self.data = data
#
# def process(self):
# self.name = self.data["subset"]
#
# instance = nuke.toNode(self.data["subset"])
# node = 'write'
#
# if not instance:
# write_data = {
# "class": node,
# "preset": self.preset,
# "avalon": self.data
# }
#
# if self.presets.get('fpath_template'):
# self.log.info("Adding template path from preset")
# write_data.update(
# {"fpath_template": self.presets["fpath_template"]}
# )
# else:
# self.log.info("Adding template path from plugin")
# write_data.update({
# "fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"})
#
# # get group node
# group_node = create_write_node(self.data["subset"], write_data)
#
# # open group node
# group_node.begin()
# for n in nuke.allNodes():
# # get write node
# if n.Class() in "Write":
# write_node = n
# group_node.end()
#
# # linking knobs to group property panel
# linking_knobs = ["first", "last", "use_limit"]
# for k in linking_knobs:
# lnk = nuke.Link_Knob(k)
# lnk.makeLink(write_node.name(), k)
# lnk.setName(k.replace('_', ' ').capitalize())
# lnk.clearFlag(nuke.STARTLINE)
# group_node.addKnob(lnk)
#
# return
# add inner write node Tab
write_node.addKnob(nuke.Tab_Knob("WriteLinkedKnobs"))
# linking knobs to group property panel
linking_knobs = ["channels", "___", "first", "last", "use_limit"]
for k in linking_knobs:
if "___" in k:
write_node.addKnob(nuke.Text_Knob(''))
else:
lnk = nuke.Link_Knob(k)
lnk.makeLink(w_node.name(), k)
lnk.setName(k.replace('_', ' ').capitalize())
lnk.clearFlag(nuke.STARTLINE)
write_node.addKnob(lnk)
return write_node

View file

@ -0,0 +1,319 @@
from avalon import api, style, io
import nuke
import nukescripts
from pype.nuke import lib as pnlib
from avalon.nuke import lib as anlib
from avalon.nuke import containerise, update_container
reload(pnlib)
class LoadBackdropNodes(api.Loader):
"""Loading Published Backdrop nodes (workfile, nukenodes)"""
representations = ["nk"]
families = ["workfile", "nukenodes"]
label = "Iport Nuke Nodes"
order = 0
icon = "eye"
color = style.colors.light
node_color = "0x7533c1ff"
def load(self, context, name, namespace, data):
"""
Loading function to import .nk file into script and wrap
it on backdrop
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# Get mouse position
n = nuke.createNode("NoOp")
xcursor, ycursor = (n.xpos(), n.ypos())
anlib.reset_selection()
nuke.delete(n)
bdn_frame = 50
with anlib.maintained_selection():
# add group from nk
nuke.nodePaste(file)
# get all pasted nodes
new_nodes = list()
nodes = nuke.selectedNodes()
# get pointer position in DAG
xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame)
# reset position to all nodes and replace inputs and output
for n in nodes:
anlib.reset_selection()
xpos = (n.xpos() - xcursor) + xpointer
ypos = (n.ypos() - ycursor) + ypointer
n.setXYpos(xpos, ypos)
# replace Input nodes for dots
if n.Class() in "Input":
dot = nuke.createNode("Dot")
new_name = n.name().replace("INP", "DOT")
dot.setName(new_name)
dot["label"].setValue(new_name)
dot.setXYpos(xpos, ypos)
new_nodes.append(dot)
# rewire
dep = n.dependent()
for d in dep:
index = next((i for i, dpcy in enumerate(
d.dependencies())
if n is dpcy), 0)
d.setInput(index, dot)
# remove Input node
anlib.reset_selection()
nuke.delete(n)
continue
# replace Input nodes for dots
elif n.Class() in "Output":
dot = nuke.createNode("Dot")
new_name = n.name() + "_DOT"
dot.setName(new_name)
dot["label"].setValue(new_name)
dot.setXYpos(xpos, ypos)
new_nodes.append(dot)
# rewire
dep = next((d for d in n.dependencies()), None)
if dep:
dot.setInput(0, dep)
# remove Input node
anlib.reset_selection()
nuke.delete(n)
continue
else:
new_nodes.append(n)
# reselect nodes with new Dot instead of Inputs and Output
anlib.reset_selection()
anlib.select_nodes(new_nodes)
# place on backdrop
bdn = nukescripts.autoBackdrop()
# add frame offset
xpos = bdn.xpos() - bdn_frame
ypos = bdn.ypos() - bdn_frame
bdwidth = bdn["bdwidth"].value() + (bdn_frame*2)
bdheight = bdn["bdheight"].value() + (bdn_frame*2)
bdn["xpos"].setValue(xpos)
bdn["ypos"].setValue(ypos)
bdn["bdwidth"].setValue(bdwidth)
bdn["bdheight"].setValue(bdheight)
bdn["name"].setValue(object_name)
bdn["label"].setValue("Version tracked frame: \n`{}`\n\nPLEASE DO NOT REMOVE OR MOVE \nANYTHING FROM THIS FRAME!".format(object_name))
bdn["note_font_size"].setValue(20)
return containerise(
node=bdn,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = api.get_representation_path(representation).replace("\\", "/")
context = representation["context"]
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
with anlib.maintained_selection():
xpos = GN.xpos()
ypos = GN.ypos()
avalon_data = anlib.get_avalon_knob_data(GN)
nuke.delete(GN)
# add group from nk
nuke.nodePaste(file)
GN = nuke.selectedNode()
anlib.set_avalon_knob_data(GN, avalon_data)
GN.setXYpos(xpos, ypos)
GN["name"].setValue(object_name)
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd88467ff", 16))
else:
GN["tile_color"].setValue(int(self.node_color, 16))
self.log.info("udated to version: {}".format(version.get("name")))
return update_container(GN, data_imprint)
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
self.log.error("Please create Viewer node before you "
"run this action again")
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
pnlib.create_backdrop(label="Input Process", layer=2,
nodes=[viewer, group_node], color="0x7c7faaff")
return True
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -13,8 +13,10 @@ class CollectAssetInfo(pyblish.api.ContextPlugin):
]
def process(self, context):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
asset_data = io.find_one({
"type": "asset",
"name": api.Session["AVALON_ASSET"]
})
self.log.info("asset_data: {}".format(asset_data))
context.data['handles'] = int(asset_data["data"].get("handles", 0))

View file

@ -15,9 +15,10 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
hosts = ["nuke", "nukeassist"]
def process(self, context):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
asset_data = io.find_one({
"type": "asset",
"name": api.Session["AVALON_ASSET"]
})
self.log.debug("asset_data: {}".format(asset_data["data"]))
instances = []

View file

@ -0,0 +1,22 @@
import os
import pype.api as pype
import pyblish.api
class CollectScriptVersion(pyblish. api.ContextPlugin):
"""Collect Script Version."""
order = pyblish.api.CollectorOrder
label = "Collect Script Version"
hosts = [
"nuke",
"nukeassist"
]
def process(self, context):
file_path = context.data["currentFile"]
base_name = os.path.basename(file_path)
# get version string
version = pype.get_version_from_path(base_name)
context.data['version'] = version

View file

@ -0,0 +1,40 @@
import pyblish.api
import nuke
class CollectSlate(pyblish.api.InstancePlugin):
"""Check if SLATE node is in scene and connected to rendering tree"""
order = pyblish.api.CollectorOrder + 0.09
label = "Collect Slate Node"
hosts = ["nuke"]
families = ["write"]
def process(self, instance):
node = instance[0]
slate = next((n for n in nuke.allNodes()
if "slate" in n.name().lower()
if not n["disable"].getValue()),
None)
if slate:
# check if slate node is connected to write node tree
slate_check = 0
slate_node = None
while slate_check == 0:
try:
node = node.dependencies()[0]
if slate.name() in node.name():
slate_node = node
slate_check = 1
except IndexError:
break
if slate_node:
instance.data["slateNode"] = slate_node
instance.data["families"].append("slate")
self.log.info(
"Slate node is in node graph: `{}`".format(slate.name()))
self.log.debug(
"__ instance: `{}`".format(instance))

View file

@ -2,8 +2,6 @@ import nuke
import pyblish.api
import os
import pype.api as pype
from avalon.nuke import (
get_avalon_knob_data,
add_publish_knob
@ -11,7 +9,7 @@ from avalon.nuke import (
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Publish current script version."""
"""Collect current script for publish."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Workfile"
@ -31,9 +29,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
base_name = os.path.basename(file_path)
subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family)
# get version string
version = pype.get_version_from_path(base_name)
# Get frame range
first_frame = int(root["first_frame"].getValue())
last_frame = int(root["last_frame"].getValue())
@ -53,7 +48,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
script_data = {
"asset": os.getenv("AVALON_ASSET", None),
"version": version,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"resolutionWidth": resolution_width,

View file

@ -50,9 +50,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# get version
version = pype.get_version_from_path(nuke.root().name())
instance.data['version'] = version
# get version to instance for integration
instance.data['version'] = instance.context.data.get(
"version", pype.get_version_from_path(nuke.root().name()))
self.log.debug('Write Version: %s' % instance.data('version'))
# create label
@ -94,12 +95,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"version": int(version),
"version": int(instance.data['version']),
"colorspace": node["colorspace"].value(),
"families": [instance.data["family"]],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"
group_node = [x for x in instance if x.Class() == "Group"][0]
deadlineChunkSize = 1
@ -129,5 +131,4 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"subsetGroup": "renders"
})
self.log.debug("instance.data: {}".format(instance.data))

View file

@ -28,6 +28,11 @@ class NukeRenderLocal(pype.api.Extractor):
self.log.debug("instance collected: {}".format(instance.data))
first_frame = instance.data.get("frameStart", None)
# exception for slate workflow
if "slate" in instance.data["families"]:
first_frame -= 1
last_frame = instance.data.get("frameEnd", None)
node_subset_name = instance.data.get("name", None)
@ -47,6 +52,10 @@ class NukeRenderLocal(pype.api.Extractor):
int(last_frame)
)
# exception for slate workflow
if "slate" in instance.data["families"]:
first_frame += 1
path = node['file'].value()
out_dir = os.path.dirname(path)
ext = node["file_type"].value()

View file

@ -6,7 +6,7 @@ import pype
reload(pnlib)
class ExtractReviewLutData(pype.api.Extractor):
class ExtractReviewDataLut(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
@ -37,8 +37,9 @@ class ExtractReviewLutData(pype.api.Extractor):
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
# generate data
with anlib.maintained_selection():
exporter = pnlib.Exporter_review_lut(
exporter = pnlib.ExporterReviewLut(
self, instance
)
data = exporter.generate_lut()

View file

@ -0,0 +1,62 @@
import os
import pyblish.api
from avalon.nuke import lib as anlib
from pype.nuke import lib as pnlib
import pype
reload(pnlib)
class ExtractReviewDataMov(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Review Data Mov"
families = ["review", "render", "render.local"]
hosts = ["nuke"]
def process(self, instance):
families = instance.data["families"]
self.log.info("Creating staging dir...")
if "representations" not in instance.data:
instance.data["representations"] = list()
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
# generate data
with anlib.maintained_selection():
exporter = pnlib.ExporterReviewMov(
self, instance)
if "render.farm" in families:
instance.data["families"].remove("review")
instance.data["families"].remove("ftrack")
data = exporter.generate_mov(farm=True)
self.log.debug(
"_ data: {}".format(data))
instance.data.update({
"bakeRenderPath": data.get("bakeRenderPath"),
"bakeScriptPath": data.get("bakeScriptPath"),
"bakeWriteNodeName": data.get("bakeWriteNodeName")
})
else:
data = exporter.generate_mov()
# assign to representations
instance.data["representations"] += data["representations"]
self.log.debug(
"_ representations: {}".format(instance.data["representations"]))

View file

@ -0,0 +1,154 @@
import os
import nuke
from avalon.nuke import lib as anlib
import pyblish.api
import pype
class ExtractSlateFrame(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Slate Frame"
families = ["slate"]
hosts = ["nuke"]
def process(self, instance):
if hasattr(self, "viewer_lut_raw"):
self.viewer_lut_raw = self.viewer_lut_raw
else:
self.viewer_lut_raw = False
with anlib.maintained_selection():
self.log.debug("instance: {}".format(instance))
self.log.debug("instance.data[families]: {}".format(
instance.data["families"]))
self.render_slate(instance)
def render_slate(self, instance):
node = instance[0] # group node
self.log.info("Creating staging dir...")
if "representations" not in instance.data:
instance.data["representations"] = list()
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
temporary_nodes = []
collection = instance.data.get("collection", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
# get first and last frame
first_frame = min(collection.indexes) - 1
if "slate" in instance.data["families"]:
first_frame += 1
last_frame = first_frame
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("frameStart", None) - 1
last_frame = first_frame
if "#" in fhead:
fhead = fhead.replace("#", "")[:-1]
previous_node = node
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
if not self.viewer_lut_raw:
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# create write node
write_node = nuke.createNode("Write")
file = fhead + "slate.png"
path = os.path.join(staging_dir, file).replace("\\", "/")
instance.data["slateFrame"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("png")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
# fill slate node with comments
self.add_comment_slate_node(instance)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
self.log.debug(
"slate frame path: {}".format(instance.data["slateFrame"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)
def get_view_process_node(self):
# Select only the target node
if nuke.selectedNodes():
[n.setSelected(False) for n in nuke.selectedNodes()]
ipn_orig = None
for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]:
ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
nuke.nodeCopy('%clipboard%')
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
nuke.nodePaste('%clipboard%')
ipn = nuke.selectedNode()
return ipn
def add_comment_slate_node(self, instance):
node = instance.data.get("slateNode")
if not node:
return
comment = instance.context.data.get("comment")
intent = instance.context.data.get("intent")
try:
node["f_submission_note"].setValue(comment)
node["f_submitting_for"].setValue(intent)
except NameError:
return
instance.data.pop("slateNode")

View file

@ -28,19 +28,16 @@ class ExtractThumbnail(pype.api.Extractor):
self.render_thumbnail(instance)
def render_thumbnail(self, instance):
node = instance[0] # group node
node = instance[0] # group node
self.log.info("Creating staging dir...")
if "representations" in instance.data:
staging_dir = instance.data[
"representations"][0]["stagingDir"].replace("\\", "/")
instance.data["stagingDir"] = staging_dir
instance.data["representations"][0]["tags"] = ["review"]
else:
instance.data["representations"] = []
# get output path
render_path = instance.data['path']
staging_dir = os.path.normpath(os.path.dirname(render_path))
instance.data["stagingDir"] = staging_dir
if "representations" not in instance.data:
instance.data["representations"] = list()
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
@ -165,7 +162,7 @@ class ExtractThumbnail(pype.api.Extractor):
if ipn_orig:
nuke.nodeCopy('%clipboard%')
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
nuke.nodePaste('%clipboard%')

View file

@ -1,7 +1,7 @@
import os
import json
import getpass
from avalon import api
from avalon.vendor import requests
import re
@ -26,31 +26,69 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
def process(self, instance):
node = instance[0]
# for x in instance:
# if x.Class() == "Write":
# node = x
#
# if node is None:
# return
context = instance.context
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
context = instance.context
self.deadline_url = "{}/api/jobs".format(DEADLINE_REST_URL)
self._comment = context.data.get("comment", "")
self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion"))
self._deadline_user = context.data.get(
"deadlineUser", getpass.getuser())
self._frame_start = int(instance.data["frameStart"])
self._frame_end = int(instance.data["frameEnd"])
# get output path
render_path = instance.data['path']
render_dir = os.path.normpath(os.path.dirname(render_path))
script_path = context.data["currentFile"]
script_name = os.path.basename(script_path)
comment = context.data.get("comment", "")
# exception for slate workflow
if "slate" in instance.data["families"]:
self._frame_start -= 1
deadline_user = context.data.get("deadlineUser", getpass.getuser())
response = self.payload_submit(instance,
script_path,
render_path,
node.name()
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = response.json()
instance.data["publishJobState"] = "Active"
if instance.data.get("bakeScriptPath"):
render_path = instance.data.get("bakeRenderPath")
script_path = instance.data.get("bakeScriptPath")
exe_node_name = instance.data.get("bakeWriteNodeName")
# exception for slate workflow
if "slate" in instance.data["families"]:
self._frame_start += 1
resp = self.payload_submit(instance,
script_path,
render_path,
exe_node_name,
response.json()
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = resp.json()
instance.data["publishJobState"] = "Suspended"
def payload_submit(self,
instance,
script_path,
render_path,
exe_node_name,
responce_data=None
):
render_dir = os.path.normpath(os.path.dirname(render_path))
script_name = os.path.basename(script_path)
jobname = "%s - %s" % (script_name, instance.name)
ver = re.search(r"\d+\.\d+", context.data.get("hostVersion"))
if not responce_data:
responce_data = {}
try:
# Ensure render folder exists
@ -58,10 +96,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
except OSError:
pass
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
@ -71,21 +105,20 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"Name": jobname,
# Arbitrary username, for visualisation in Monitor
"UserName": deadline_user,
"UserName": self._deadline_user,
"Priority": instance.data["deadlinePriority"],
"Pool": "2d",
"SecondaryPool": "2d",
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
start=self._frame_start,
end=self._frame_end
),
"ChunkSize": instance.data["deadlineChunkSize"],
"Priority": instance.data["deadlinePriority"],
"Comment": self._comment,
"Comment": comment,
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
# "OutputFilename0": output_filename_0.replace("\\", "/"),
},
"PluginInfo": {
# Input
@ -96,27 +129,29 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
# "OutputFilePrefix": render_variables["filename_prefix"],
# Mandatory for Deadline
"Version": ver.group(),
"Version": self._ver.group(),
# Resolve relative references
"ProjectPath": script_path,
"AWSAssetFile0": render_path,
# Only the specific write node is rendered.
"WriteNode": node.name()
"WriteNode": exe_node_name
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
if responce_data.get("_id"):
payload["JobInfo"].update({
"JobType": "Normal",
"BatchName": responce_data["Props"]["Batch"],
"JobDependency0": responce_data["_id"],
"ChunkSize": 99999999
})
# Include critical environment variables with submission
keys = [
# This will trigger `userSetup.py` on the slave
# such that proper initialisation happens the same
# way as it does on a local machine.
# TODO(marcus): This won't work if the slaves don't
# have accesss to these paths, such as if slaves are
# running Linux and the submitter is on Windows.
"PYTHONPATH",
"PATH",
"AVALON_SCHEMA",
@ -162,11 +197,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
if key == "PYTHONPATH":
clean_path = clean_path.replace('python2', 'python3')
clean_path = clean_path.replace(
os.path.normpath(
environment['PYPE_STUDIO_CORE_MOUNT']), # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_PATH'])) # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_MOUNT']), # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_PATH'])) # noqa
clean_environment[key] = clean_path
environment = clean_environment
@ -181,20 +217,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
plugin = payload["JobInfo"]["Plugin"]
self.log.info("using render plugin : {}".format(plugin))
self.preflight_check(instance)
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(DEADLINE_REST_URL)
response = requests.post(url, json=payload)
response = requests.post(self.deadline_url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = response.json()
instance.data["publishJobState"] = "Active"
return response
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""

View file

@ -75,6 +75,9 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
self.log.info(
'len(collection.indexes): {}'.format(collected_frames_len)
)
if "slate" in instance.data["families"]:
collected_frames_len -= 1
assert (collected_frames_len == frame_length), (
"{} missing frames. Use repair to render all frames"

View file

@ -8,24 +8,31 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
"""Ensure knobs are consistent.
Knobs to validate and their values comes from the
"nuke/knobs.json" preset, which needs this structure:
{
"family": {
"knob_name": knob_value
}
}
Example for presets in config:
"presets/plugins/nuke/publish.json" preset, which needs this structure:
"ValidateNukeWriteKnobs": {
"enabled": true,
"knobs": {
"family": {
"knob_name": knob_value
}
}
}
"""
order = pyblish.api.ValidatorOrder
label = "Knobs"
label = "Validate Write Knobs"
hosts = ["nuke"]
actions = [pype.api.RepairContextAction]
optional = True
def process(self, context):
# Check for preset existence.
if not context.data["presets"]["nuke"].get("knobs"):
if not getattr(self, "knobs"):
return
self.log.debug("__ self.knobs: {}".format(self.knobs))
invalid = self.get_invalid(context, compute=True)
if invalid:
@ -43,7 +50,6 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
@classmethod
def get_invalid_knobs(cls, context):
presets = context.data["presets"]["nuke"]["knobs"]
invalid_knobs = []
for instance in context:
# Filter publisable instances.
@ -53,15 +59,15 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
# Filter families.
families = [instance.data["family"]]
families += instance.data.get("families", [])
families = list(set(families) & set(presets.keys()))
families = list(set(families) & set(cls.knobs.keys()))
if not families:
continue
# Get all knobs to validate.
knobs = {}
for family in families:
for preset in presets[family]:
knobs.update({preset: presets[family][preset]})
for preset in cls.knobs[family]:
knobs.update({preset: cls.knobs[family][preset]})
# Get invalid knobs.
nodes = []

View file

@ -169,32 +169,44 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:

View file

@ -3,6 +3,7 @@ from avalon import io
from pype.action import get_errored_instances_from_context
import pype.api as pype
@pyblish.api.log
class RepairNukestudioVersionUp(pyblish.api.Action):
label = "Version Up Workfile"
@ -53,13 +54,17 @@ class ValidateVersion(pyblish.api.InstancePlugin):
io.install()
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": subset_name})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": subset_name
})
version_db = io.find_one({
'type': 'version',

View file

@ -77,32 +77,44 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:

View file

@ -170,8 +170,10 @@ def switch(asset_name, filepath=None, new=True):
assert asset, "Could not find '%s' in the database" % asset_name
# Get current project
self._project = io.find_one({"type": "project",
"name": api.Session["AVALON_PROJECT"]})
self._project = io.find_one({
"type": "project",
"name": api.Session["AVALON_PROJECT"]
})
# Go to comp
if not filepath:

View file

@ -39,6 +39,25 @@ def _streams(source):
return json.loads(out)['streams']
def get_fps(str_value):
if str_value == "0/0":
print("Source has \"r_frame_rate\" value set to \"0/0\".")
return "Unknown"
items = str_value.split("/")
if len(items) == 1:
fps = float(items[0])
elif len(items) == 2:
fps = float(items[0]) / float(items[1])
# Check if fps is integer or float number
if int(fps) == fps:
fps = int(fps)
return str(fps)
class ModifiedBurnins(ffmpeg_burnins.Burnins):
'''
This is modification of OTIO FFmpeg Burnin adapter.
@ -95,6 +114,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
streams = _streams(source)
super().__init__(source, streams)
if options_init:
self.options_init.update(options_init)
@ -139,12 +159,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
options['frame_offset'] = start_frame
expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
_text = str(int(self.end_frame + options['frame_offset']))
if text and isinstance(text, str):
text = r"{}".format(text)
expr = text.replace("{current_frame}", expr)
text = text.replace("{current_frame}", _text)
options['expression'] = expr
text = str(int(self.end_frame + options['frame_offset']))
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_timecode(self, align, options=None, start_frame=None):
@ -328,6 +349,17 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
frame_start = data.get("frame_start")
frame_start_tc = data.get('frame_start_tc', frame_start)
stream = burnin._streams[0]
if "resolution_width" not in data:
data["resolution_width"] = stream.get("width", "Unknown")
if "resolution_height" not in data:
data["resolution_height"] = stream.get("height", "Unknown")
if "fps" not in data:
data["fps"] = get_fps(stream.get("r_frame_rate", "0/0"))
for align_text, preset in presets.get('burnins', {}).items():
align = None
if align_text == 'TOP_LEFT':
@ -382,12 +414,14 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
elif bi_func == 'timecode':
burnin.add_timecode(align, start_frame=frame_start_tc)
elif bi_func == 'text':
if not preset.get('text'):
log.error('Text is not set for text function burnin!')
return
text = preset['text'].format(**data)
burnin.add_text(text, align)
elif bi_func == "datetime":
date_format = preset["format"]
burnin.add_datetime(date_format, align)
@ -414,4 +448,4 @@ if __name__ == '__main__':
data['codec'],
data['output'],
data['burnin_data']
)
)

View file

@ -462,8 +462,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
# Check whether the conversion can be done by the Loader.
# They *must* use the same asset, subset and Loader for
# `api.update` to make sense.
old = io.find_one({"_id": io.ObjectId(representation_current)})
new = io.find_one({"_id": io.ObjectId(representation_new)})
old = io.find_one({
"_id": io.ObjectId(representation_current)
})
new = io.find_one({
"_id": io.ObjectId(representation_new)
})
is_valid = compare_representations(old=old, new=new)
if not is_valid:
log.error("Skipping: %s. See log for details.",

BIN
res/app_icons/blender.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

View file

@ -0,0 +1,34 @@
<?xml version="1.0" ?>
<svg id="Icons" version="1.1" viewBox="0 0 512 512" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g>
<linearGradient gradientUnits="userSpaceOnUse" id="SVGID_1_" x1="-0.0000027" x2="512" y1="256" y2="256">
<stop offset="0" style="stop-color:#00b300"/>
<stop offset="1" style="stop-color:#006600"/>
</linearGradient>
<circle cx="256" cy="256" fill="url(#SVGID_1_)" r="256"/>
<linearGradient gradientUnits="userSpaceOnUse" id="SVGID_2_" x1="42.6666641" x2="469.3333435" y1="256.0005188" y2="256.0005188">
<stop offset="0" style="stop-color:#006600"/>
<stop offset="1" style="stop-color:#00b300"/>
</linearGradient>
<path d="M256,469.3338623c-117.6314697,0-213.3333435-95.7023926-213.3333435-213.3333435 c0-117.6314545,95.7018661-213.333313,213.3333435-213.333313c117.6357422,0,213.3333435,95.7018661,213.3333435,213.333313 C469.3333435,373.6314697,373.6357422,469.3338623,256,469.3338623z" fill="url(#SVGID_2_)"/>
</g>
<g transform="
translate(120, 120)
scale(9)
">
<style type="text/css">
.st0{fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:10;}
.st1{fill:none;stroke:#000000;stroke-width:2;stroke-linejoin:round;stroke-miterlimit:10;}
.st2{fill:none;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;}
.st3{fill:none;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-miterlimit:10;}
.st4{fill:none;stroke:#000000;stroke-width:2;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:3;}
</style>
<polyline class="st1" points="2,8 19,8 19,23 13,23 "/>
<circle class="st1" cx="24" cy="23" r="2"/>
<circle class="st1" cx="8" cy="23" r="2"/>
<polyline class="st1" points="19,23 19,12 25,12 29,17 29,23 26,23 "/>
<line class="st1" x1="4" x2="13" y1="12" y2="12"/>
<line class="st1" x1="2" x2="11" y1="16" y2="16"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2 KiB

3
setup/blender/init.py Normal file
View file

@ -0,0 +1,3 @@
from pype import blender
blender.install()

View file

@ -0,0 +1 @@
import knob_scripter

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,2 @@
# default write mov
nuke.knobDefault('Write.mov.colorspace', 'sRGB')

View file

@ -1,4 +1,7 @@
import os
import sys
import atom_server
import KnobScripter
from pype.nuke.lib import (
writes_version_sync,
@ -16,6 +19,6 @@ log = Logger().get_logger(__name__, "nuke")
nuke.addOnScriptSave(onScriptLoad)
nuke.addOnScriptLoad(checkInventoryVersions)
nuke.addOnScriptSave(checkInventoryVersions)
nuke.addOnScriptSave(writes_version_sync)
# nuke.addOnScriptSave(writes_version_sync)
log.info('Automatic syncing of write file knob to script version')