Merge branch 'release/2.5'

This commit is contained in:
Milan Kolar 2020-02-10 22:49:18 +01:00
commit ee3115ff2b
173 changed files with 10637 additions and 1880 deletions

View file

@ -9,7 +9,7 @@ from pypeapp import config
import logging
log = logging.getLogger(__name__)
__version__ = "2.3.0"
__version__ = "2.5.0"
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")

34
pype/blender/__init__.py Normal file
View file

@ -0,0 +1,34 @@
import logging
from pathlib import Path
import os
import bpy
from avalon import api as avalon
from pyblish import api as pyblish
from .plugin import AssetLoader
logger = logging.getLogger("pype.blender")
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create")
def install():
"""Install Blender configuration for Avalon."""
pyblish.register_plugin_path(str(PUBLISH_PATH))
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
def uninstall():
"""Uninstall Blender configuration for Avalon."""
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))

47
pype/blender/action.py Normal file
View file

@ -0,0 +1,47 @@
import bpy
import pyblish.api
from ..action import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid objects in Blender when a publish plug-in failed."""
label = "Select Invalid"
on = "failed"
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning(
"Failed plug-in doens't have any selectable objects."
)
bpy.ops.object.select_all(action='DESELECT')
# Make sure every node is only processed once
invalid = list(set(invalid))
if not invalid:
self.log.info("No invalid nodes found.")
return
invalid_names = [obj.name for obj in invalid]
self.log.info(
"Selecting invalid objects: %s", ", ".join(invalid_names)
)
# Select the objects and also make the last one the active object.
for obj in invalid:
obj.select_set(True)
bpy.context.view_layer.objects.active = invalid[-1]

135
pype/blender/plugin.py Normal file
View file

@ -0,0 +1,135 @@
"""Shared functionality for pipeline plugins for Blender."""
from pathlib import Path
from typing import Dict, List, Optional
import bpy
from avalon import api
VALID_EXTENSIONS = [".blend"]
def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str:
"""Return a consistent name for a model asset."""
name = f"{asset}_{subset}"
if namespace:
name = f"{namespace}:{name}"
return name
class AssetLoader(api.Loader):
"""A basic AssetLoader for Blender
This will implement the basic logic for linking/appending assets
into another Blender scene.
The `update` method should be implemented by a sub-class, because
it's different for different types (e.g. model, rig, animation,
etc.).
"""
@staticmethod
def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]:
"""Get the 'instance empty' that holds the collection instance."""
for node in nodes:
if not isinstance(node, bpy.types.Object):
continue
if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION'
and node.instance_collection and node.name == instance_name):
return node
return None
@staticmethod
def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]:
"""Get the 'instance collection' (container) for this asset."""
for node in nodes:
if not isinstance(node, bpy.types.Collection):
continue
if node.name == instance_name:
return node
return None
@staticmethod
def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library:
"""Find the library file from the container.
It traverses the objects from this collection, checks if there is only
1 library from which the objects come from and returns the library.
Warning:
No nested collections are supported at the moment!
"""
assert not container.children, "Nested collections are not supported."
assert container.objects, "The collection doesn't contain any objects."
libraries = set()
for obj in container.objects:
assert obj.library, f"'{obj.name}' is not linked."
libraries.add(obj.library)
assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library."
return list(libraries)[0]
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def load(self,
context: dict,
name: Optional[str] = None,
namespace: Optional[str] = None,
options: Optional[Dict] = None) -> Optional[bpy.types.Collection]:
"""Load asset via database
Arguments:
context: Full parenthood of representation to load
name: Use pre-defined name
namespace: Use pre-defined namespace
options: Additional settings dictionary
"""
# TODO (jasper): make it possible to add the asset several times by
# just re-using the collection
assert Path(self.fname).exists(), f"{self.fname} doesn't exist."
self.process_asset(
context=context,
name=name,
namespace=namespace,
options=options,
)
# Only containerise if anything was loaded by the Loader.
nodes = self[:]
if not nodes:
return None
# Only containerise if it's not already a collection from a .blend file.
representation = context["representation"]["name"]
if representation != "blend":
from avalon.blender.pipeline import containerise
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__,
)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
instance_name = model_name(asset, subset, namespace)
return self._get_instance_collection(instance_name, nodes)
def update(self, container: Dict, representation: Dict):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def remove(self, container: Dict) -> bool:
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")

View file

@ -99,6 +99,7 @@ class DeleteAssetSubset(BaseAction):
# Filter event even more (skip task entities)
# - task entities are not relevant for avalon
entity_mapping = {}
for entity in entities:
ftrack_id = entity["id"]
if ftrack_id not in ftrack_ids:
@ -107,6 +108,8 @@ class DeleteAssetSubset(BaseAction):
if entity.entity_type.lower() == "task":
ftrack_ids.remove(ftrack_id)
entity_mapping[ftrack_id] = entity
if not ftrack_ids:
# It is bug if this happens!
return {
@ -122,11 +125,41 @@ class DeleteAssetSubset(BaseAction):
project_name = project["full_name"]
self.dbcon.Session["AVALON_PROJECT"] = project_name
selected_av_entities = self.dbcon.find({
selected_av_entities = list(self.dbcon.find({
"type": "asset",
"data.ftrackId": {"$in": ftrack_ids}
})
selected_av_entities = [ent for ent in selected_av_entities]
}))
found_without_ftrack_id = {}
if len(selected_av_entities) != len(ftrack_ids):
found_ftrack_ids = [
ent["data"]["ftrackId"] for ent in selected_av_entities
]
for ftrack_id, entity in entity_mapping.items():
if ftrack_id in found_ftrack_ids:
continue
av_ents_by_name = list(self.dbcon.find({
"type": "asset",
"name": entity["name"]
}))
if not av_ents_by_name:
continue
ent_path_items = [ent["name"] for ent in entity["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
# TODO we should say to user that
# few of them are missing in avalon
for av_ent in av_ents_by_name:
if av_ent["data"]["parents"] != parents:
continue
# TODO we should say to user that found entity
# with same name does not match same ftrack id?
if "ftrackId" not in av_ent["data"]:
selected_av_entities.append(av_ent)
found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id
break
if not selected_av_entities:
return {
"success": False,
@ -155,7 +188,8 @@ class DeleteAssetSubset(BaseAction):
"created_at": datetime.now(),
"project_name": project_name,
"subset_ids_by_name": {},
"subset_ids_by_parent": {}
"subset_ids_by_parent": {},
"without_ftrack_id": found_without_ftrack_id
}
id_item = {
@ -413,14 +447,21 @@ class DeleteAssetSubset(BaseAction):
asset_ids_to_archive = []
ftrack_ids_to_delete = []
if len(assets_to_delete) > 0:
map_av_ftrack_id = spec_data["without_ftrack_id"]
# Prepare data when deleting whole avalon asset
avalon_assets = self.dbcon.find({"type": "asset"})
avalon_assets_by_parent = collections.defaultdict(list)
for asset in avalon_assets:
asset_id = asset["_id"]
parent_id = asset["data"]["visualParent"]
avalon_assets_by_parent[parent_id].append(asset)
if asset["_id"] in assets_to_delete:
ftrack_id = asset["data"]["ftrackId"]
if asset_id in assets_to_delete:
ftrack_id = map_av_ftrack_id.get(str(asset_id))
if not ftrack_id:
ftrack_id = asset["data"].get("ftrackId")
if not ftrack_id:
continue
ftrack_ids_to_delete.append(ftrack_id)
children_queue = Queue()

View file

@ -0,0 +1,528 @@
import os
import copy
import shutil
import collections
import string
import clique
from bson.objectid import ObjectId
from avalon import pipeline
from avalon.vendor import filelink
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
from pypeapp import Anatomy
from pype.ftrack import BaseAction
from pype.ftrack.lib.avalon_sync import CustAttrIdKey
class Delivery(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = "delivery.action"
#: Action label.
label = "Delivery"
#: Action description.
description = "Deliver data to client"
#: roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project manager"]
icon = '{}/ftrack/action_icons/Delivery.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
db_con = DbConnector()
def discover(self, session, entities, event):
''' Validation '''
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
return False
def interface(self, session, entities, event):
if event["data"].get("values", {}):
return
title = "Delivery data to Client"
items = []
item_splitter = {"type": "label", "value": "---"}
# Prepare component names for processing
components = None
project = None
for entity in entities:
if project is None:
project_id = None
for ent_info in entity["link"]:
if ent_info["type"].lower() == "project":
project_id = ent_info["id"]
break
if project_id is None:
project = entity["asset"]["parent"]["project"]
else:
project = session.query((
"select id, full_name from Project where id is \"{}\""
).format(project_id)).one()
_components = set(
[component["name"] for component in entity["components"]]
)
if components is None:
components = _components
continue
components = components.intersection(_components)
if not components:
break
project_name = project["full_name"]
items.append({
"type": "hidden",
"name": "__project_name__",
"value": project_name
})
# Prpeare anatomy data
anatomy = Anatomy(project_name)
new_anatomies = []
first = None
for key in (anatomy.templates.get("delivery") or {}):
new_anatomies.append({
"label": key,
"value": key
})
if first is None:
first = key
skipped = False
# Add message if there are any common components
if not components or not new_anatomies:
skipped = True
items.append({
"type": "label",
"value": "<h1>Something went wrong:</h1>"
})
items.append({
"type": "hidden",
"name": "__skipped__",
"value": skipped
})
if not components:
if len(entities) == 1:
items.append({
"type": "label",
"value": (
"- Selected entity doesn't have components to deliver."
)
})
else:
items.append({
"type": "label",
"value": (
"- Selected entities don't have common components."
)
})
# Add message if delivery anatomies are not set
if not new_anatomies:
items.append({
"type": "label",
"value": (
"- `\"delivery\"` anatomy key is not set in config."
)
})
# Skip if there are any data shortcomings
if skipped:
return {
"items": items,
"title": title
}
items.append({
"value": "<h1>Choose Components to deliver</h1>",
"type": "label"
})
for component in components:
items.append({
"type": "boolean",
"value": False,
"label": component,
"name": component
})
items.append(item_splitter)
items.append({
"value": "<h2>Location for delivery</h2>",
"type": "label"
})
items.append({
"type": "label",
"value": (
"<i>NOTE: It is possible to replace `root` key in anatomy.</i>"
)
})
items.append({
"type": "text",
"name": "__location_path__",
"empty_text": "Type location path here...(Optional)"
})
items.append(item_splitter)
items.append({
"value": "<h2>Anatomy of delivery files</h2>",
"type": "label"
})
items.append({
"type": "label",
"value": (
"<p><i>NOTE: These can be set in Anatomy.yaml"
" within `delivery` key.</i></p>"
)
})
items.append({
"type": "enumerator",
"name": "__new_anatomies__",
"data": new_anatomies,
"value": first
})
return {
"items": items,
"title": title
}
def launch(self, session, entities, event):
if "values" not in event["data"]:
return
self.report_items = collections.defaultdict(list)
values = event["data"]["values"]
skipped = values.pop("__skipped__")
if skipped:
return None
component_names = []
location_path = values.pop("__location_path__")
anatomy_name = values.pop("__new_anatomies__")
project_name = values.pop("__project_name__")
for key, value in values.items():
if value is True:
component_names.append(key)
if not component_names:
return {
"success": True,
"message": "Not selected components to deliver."
}
location_path = location_path.strip()
if location_path:
location_path = os.path.normpath(location_path)
if not os.path.exists(location_path):
return {
"success": False,
"message": (
"Entered location path does not exists. \"{}\""
).format(location_path)
}
self.db_con.install()
self.db_con.Session["AVALON_PROJECT"] = project_name
repres_to_deliver = []
for entity in entities:
asset = entity["asset"]
subset_name = asset["name"]
version = entity["version"]
parent = asset["parent"]
parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if parent_mongo_id:
parent_mongo_id = ObjectId(parent_mongo_id)
else:
asset_ent = self.db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
if not asset_ent:
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
msg = "Not synchronized entities to avalon"
self.report_items[msg].append(ent_path)
self.log.warning("{} <{}>".format(msg, ent_path))
continue
parent_mongo_id = asset_ent["_id"]
subset_ent = self.db_con.find_one({
"type": "subset",
"parent": parent_mongo_id,
"name": subset_name
})
version_ent = self.db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
repre_ents = self.db_con.find({
"type": "representation",
"parent": version_ent["_id"]
})
repres_by_name = {}
for repre in repre_ents:
repre_name = repre["name"]
repres_by_name[repre_name] = repre
for component in entity["components"]:
comp_name = component["name"]
if comp_name not in component_names:
continue
repre = repres_by_name.get(comp_name)
repres_to_deliver.append(repre)
if not location_path:
location_path = os.environ.get("AVALON_PROJECTS") or ""
print(location_path)
anatomy = Anatomy(project_name)
for repre in repres_to_deliver:
# Get destination repre path
anatomy_data = copy.deepcopy(repre["context"])
anatomy_data["root"] = location_path
anatomy_filled = anatomy.format_all(anatomy_data)
test_path = anatomy_filled["delivery"][anatomy_name]
if not test_path.solved:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(anatomy_name)
if test_path.missing_keys:
keys = ", ".join(test_path.missing_keys)
sub_msg = (
"Representation: {}<br>- Missing keys: \"{}\"<br>"
).format(str(repre["_id"]), keys)
if test_path.invalid_types:
items = []
for key, value in test_path.invalid_types.items():
items.append("\"{}\" {}".format(key, str(value)))
keys = ", ".join(items)
sub_msg = (
"Representation: {}<br>"
"- Invalid value DataType: \"{}\"<br>"
).format(str(repre["_id"]), keys)
self.report_items[msg].append(sub_msg)
self.log.warning(
"{} Representation: \"{}\" Filled: <{}>".format(
msg, str(repre["_id"]), str(result)
)
)
continue
# Get source repre path
frame = repre['context'].get('frame')
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
repre_path = self.path_from_represenation(repre)
# TODO add backup solution where root of path from component
# is repalced with AVALON_PROJECTS root
if not frame:
self.process_single_file(
repre_path, anatomy, anatomy_name, anatomy_data
)
else:
self.process_sequence(
repre_path, anatomy, anatomy_name, anatomy_data
)
self.db_con.uninstall()
return self.report()
def process_single_file(
self, repre_path, anatomy, anatomy_name, anatomy_data
):
anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
self.copy_file(repre_path, delivery_path)
def process_sequence(
self, repre_path, anatomy, anatomy_name, anatomy_data
):
dir_path, file_name = os.path.split(str(repre_path))
base_name, ext = os.path.splitext(file_name)
file_name_items = None
if "#" in base_name:
file_name_items = [part for part in base_name.split("#") if part]
elif "%" in base_name:
file_name_items = base_name.split("%")
if not file_name_items:
msg = "Source file was not found"
self.report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
# skip if collection don't have same basename
if not col.head.startswith(file_name_items[0]):
continue
src_collection = col
break
if src_collection is None:
# TODO log error!
msg = "Source collection of files was not found"
self.report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name]
print(delivery_path)
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
self.copy_file(src, dst)
def path_from_represenation(self, representation):
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = os.environ.get("AVALON_PROJECTS") or ""
path = pipeline.format_template_with_optional_keys(
context, template
)
except KeyError:
# Template references unavailable data
return None
return os.path.normpath(path)
def copy_file(self, src_path, dst_path):
if os.path.exists(dst_path):
return
try:
filelink.create(
src_path,
dst_path,
filelink.HARDLINK
)
except OSError:
shutil.copyfile(src_path, dst_path)
def report(self):
items = []
title = "Delivery report"
for msg, _items in self.report_items.items():
if not _items:
continue
if items:
items.append({"type": "label", "value": "---"})
items.append({
"type": "label",
"value": "# {}".format(msg)
})
if not isinstance(_items, (list, tuple)):
_items = [_items]
__items = []
for item in _items:
__items.append(str(item))
items.append({
"type": "label",
"value": '<p>{}</p>'.format("<br>".join(__items))
})
if not items:
return {
"success": True,
"message": "Delivery Finished"
}
return {
"items": items,
"title": title,
"success": False,
"message": "Delivery Finished"
}
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
Delivery(session, plugins_presets).register()

View file

@ -70,7 +70,10 @@ class SyncToAvalonLocal(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
self.entities_factory.launch_setup(ft_project_name)
output = self.entities_factory.launch_setup(ft_project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()

View file

@ -105,7 +105,10 @@ class SyncToAvalonServer(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
self.entities_factory.launch_setup(ft_project_name)
output = self.entities_factory.launch_setup(ft_project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()

View file

@ -31,7 +31,7 @@ class SyncToAvalonEvent(BaseEvent):
"timelog", "auth_userrole", "appointment"
]
ignore_ent_types = ["Milestone"]
ignore_keys = ["statusid"]
ignore_keys = ["statusid", "thumbid"]
project_query = (
"select full_name, name, custom_attributes"
@ -131,7 +131,9 @@ class SyncToAvalonEvent(BaseEvent):
ftrack_id = proj["data"]["ftrackId"]
self._avalon_ents_by_ftrack_id[ftrack_id] = proj
for ent in ents:
ftrack_id = ent["data"]["ftrackId"]
ftrack_id = ent["data"].get("ftrackId")
if ftrack_id is None:
continue
self._avalon_ents_by_ftrack_id[ftrack_id] = ent
return self._avalon_ents_by_ftrack_id
@ -484,6 +486,14 @@ class SyncToAvalonEvent(BaseEvent):
action = ent_info["action"]
ftrack_id = ent_info["entityId"]
if isinstance(ftrack_id, list):
self.log.warning((
"BUG REPORT: Entity info has `entityId` as `list` \"{}\""
).format(ent_info))
if len(ftrack_id) == 0:
continue
ftrack_id = ftrack_id[0]
if action == "move":
ent_keys = ent_info["keys"]
# Seprate update info from move action
@ -1427,6 +1437,93 @@ class SyncToAvalonEvent(BaseEvent):
parent_id = ent_info["parentId"]
new_tasks_by_parent[parent_id].append(ent_info)
pop_out_ents.append(ftrack_id)
continue
name = (
ent_info
.get("changes", {})
.get("name", {})
.get("new")
)
avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {}
avalon_ent_by_name_ftrack_id = (
avalon_ent_by_name
.get("data", {})
.get("ftrackId")
)
if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None:
ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
if not ftrack_ent:
ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], ftrack_id
)
).one()
self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
avalon_ent_parents = (
avalon_ent_by_name.get("data", {}).get("parents")
)
if parents == avalon_ent_parents:
self.dbcon.update_one({
"_id": avalon_ent_by_name["_id"]
}, {
"$set": {
"data.ftrackId": ftrack_id,
"data.entityType": entity_type
}
})
avalon_ent_by_name["data"]["ftrackId"] = ftrack_id
avalon_ent_by_name["data"]["entityType"] = entity_type
self._avalon_ents_by_ftrack_id[ftrack_id] = (
avalon_ent_by_name
)
if self._avalon_ents_by_parent_id:
found = None
for _parent_id_, _entities_ in (
self._avalon_ents_by_parent_id.items()
):
for _idx_, entity in enumerate(_entities_):
if entity["_id"] == avalon_ent_by_name["_id"]:
found = (_parent_id_, _idx_)
break
if found:
break
if found:
_parent_id_, _idx_ = found
self._avalon_ents_by_parent_id[_parent_id_][
_idx_] = avalon_ent_by_name
if self._avalon_ents_by_id:
self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = (
avalon_ent_by_name
)
if self._avalon_ents_by_name:
self._avalon_ents_by_name[name] = avalon_ent_by_name
if self._avalon_ents:
found = None
project, entities = self._avalon_ents
for _idx_, _ent_ in enumerate(entities):
if _ent_["_id"] != avalon_ent_by_name["_id"]:
continue
found = _idx_
break
if found is not None:
entities[found] = avalon_ent_by_name
self._avalon_ents = project, entities
pop_out_ents.append(ftrack_id)
continue
configuration_id = entity_type_conf_ids.get(entity_type)
if not configuration_id:
@ -1731,6 +1828,13 @@ class SyncToAvalonEvent(BaseEvent):
obj_type_id = ent_info["objectTypeId"]
ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id)
if ent_cust_attrs is None:
self.log.warning((
"BUG REPORT: Entity has ent type without"
" custom attributes <{}> \"{}\""
).format(entType, ent_info))
continue
for key, values in ent_info["changes"].items():
if key in hier_attrs_keys:
self.hier_cust_attrs_changes[key].append(ftrack_id)

View file

@ -207,7 +207,9 @@ class UserAssigmentEvent(BaseEvent):
# formatting work dir is easiest part as we can use whole path
work_dir = anatomy.format(data)['avalon']['work']
# we also need publish but not whole
publish = anatomy.format_all(data)['partial']['avalon']['publish']
filled_all = anatomy.format_all(data)
publish = filled_all['avalon']['publish']
# now find path to {asset}
m = re.search("(^.+?{})".format(data['asset']),
publish)

View file

@ -265,6 +265,37 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
return self._send_packet(self._code_name_mapping["heartbeat"])
return super()._handle_packet(code, packet_identifier, path, data)
class UserEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(UserEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"hearbeat")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(UserEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class SocketSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(

View file

@ -1,4 +1,5 @@
import os
import sys
import time
import socket
import threading
@ -26,6 +27,8 @@ class SocketThread(threading.Thread):
self.mongo_error = False
self._temp_data = {}
def stop(self):
self._is_running = False
@ -50,8 +53,7 @@ class SocketThread(threading.Thread):
)
self.subproc = subprocess.Popen(
["python", self.filepath, "-port", str(self.port)],
stdout=subprocess.PIPE
[sys.executable, self.filepath, "-port", str(self.port)]
)
# Listen for incoming connections
@ -81,8 +83,9 @@ class SocketThread(threading.Thread):
try:
if not self._is_running:
break
data = None
try:
data = connection.recv(16)
data = self.get_data_from_con(connection)
time_con = time.time()
except socket.timeout:
@ -99,10 +102,7 @@ class SocketThread(threading.Thread):
self._is_running = False
break
if data:
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)
self._handle_data(connection, data)
except Exception as exc:
self.log.error(
@ -115,9 +115,15 @@ class SocketThread(threading.Thread):
if self.subproc.poll() is None:
self.subproc.terminate()
lines = self.subproc.stdout.readlines()
if lines:
print("*** Socked Thread stdout ***")
for line in lines:
os.write(1, line)
self.finished = True
def get_data_from_con(self, connection):
return connection.recv(16)
def _handle_data(self, connection, data):
if not data:
return
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)

View file

@ -0,0 +1,56 @@
import sys
import signal
import socket
import traceback
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub
from pypeapp import Logger
log = Logger().get_logger("FtrackUserServer")
def main(args):
port = int(args[-1])
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ("localhost", port)
log.debug(
"User Ftrack Server connected to {} port {}".format(*server_address)
)
sock.connect(server_address)
sock.sendall(b"CreatedUser")
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub
)
server = FtrackServer("action")
log.debug("Launched User Ftrack Server")
server.run_server(session=session)
except Exception:
traceback.print_exception(*sys.exc_info())
finally:
log.debug("Closing socket")
sock.close()
return 1
if __name__ == "__main__":
# Register interupt signal
def signal_handler(sig, frame):
log.info(
"Process was forced to stop. Process ended."
)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
sys.exit(main(sys.argv))

View file

@ -1722,7 +1722,11 @@ class SyncEntitiesFactory:
self.avalon_project_id = new_id
self._avalon_ents_by_id[str(new_id)] = project_item
if self._avalon_ents_by_ftrack_id is None:
self._avalon_ents_by_ftrack_id = {}
self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id)
if self._avalon_ents_by_name is None:
self._avalon_ents_by_name = {}
self._avalon_ents_by_name[project_item["name"]] = str(new_id)
self.create_list.append(project_item)

View file

@ -1,26 +1,27 @@
import os
import json
import threading
import time
from Qt import QtCore, QtGui, QtWidgets
import datetime
import threading
from Qt import QtCore, QtWidgets
import ftrack_api
from pypeapp import style
from pype.ftrack import FtrackServer, check_ftrack_url, credentials
from ..ftrack_server.lib import check_ftrack_url
from ..ftrack_server import socket_thread
from ..lib import credentials
from . import login_dialog
from pype import api as pype
from pypeapp import Logger
log = pype.Logger().get_logger("FtrackModule", "ftrack")
log = Logger().get_logger("FtrackModule", "ftrack")
class FtrackModule:
def __init__(self, main_parent=None, parent=None):
self.parent = parent
self.widget_login = login_dialog.Login_Dialog_ui(self)
self.action_server = FtrackServer('action')
self.thread_action_server = None
self.thread_socket_server = None
self.thread_timer = None
self.bool_logged = False
@ -75,14 +76,6 @@ class FtrackModule:
# Actions part
def start_action_server(self):
self.bool_action_thread_running = True
self.set_menu_visibility()
if (
self.thread_action_server is not None and
self.bool_action_thread_running is False
):
self.stop_action_server()
if self.thread_action_server is None:
self.thread_action_server = threading.Thread(
target=self.set_action_server
@ -90,35 +83,114 @@ class FtrackModule:
self.thread_action_server.start()
def set_action_server(self):
first_check = True
while self.bool_action_thread_running is True:
if not check_ftrack_url(os.environ['FTRACK_SERVER']):
if first_check:
log.warning(
"Could not connect to Ftrack server"
)
first_check = False
if self.bool_action_server_running:
return
self.bool_action_server_running = True
self.bool_action_thread_running = False
ftrack_url = os.environ['FTRACK_SERVER']
parent_file_path = os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
min_fail_seconds = 5
max_fail_count = 3
wait_time_after_max_fail = 10
# Threads data
thread_name = "ActionServerThread"
thread_port = 10021
subprocess_path = (
"{}/ftrack_server/sub_user_server.py".format(parent_file_path)
)
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
last_failed = datetime.datetime.now()
failed_count = 0
ftrack_accessible = False
printed_ftrack_error = False
# Main loop
while True:
if not self.bool_action_server_running:
log.debug("Action server was pushed to stop.")
break
# Check if accessible Ftrack and Mongo url
if not ftrack_accessible:
ftrack_accessible = check_ftrack_url(ftrack_url)
# Run threads only if Ftrack is accessible
if not ftrack_accessible:
if not printed_ftrack_error:
log.warning("Can't access Ftrack {}".format(ftrack_url))
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
self.bool_action_thread_running = False
self.set_menu_visibility()
printed_ftrack_error = True
time.sleep(1)
continue
log.info(
"Connected to Ftrack server. Running actions session"
)
try:
self.bool_action_server_running = True
printed_ftrack_error = False
# Run backup thread which does not requeire mongo to work
if self.thread_socket_server is None:
if failed_count < max_fail_count:
self.thread_socket_server = socket_thread.SocketThread(
thread_name, thread_port, subprocess_path
)
self.thread_socket_server.start()
self.bool_action_thread_running = True
self.set_menu_visibility()
elif failed_count == max_fail_count:
log.warning((
"Action server failed {} times."
" I'll try to run again {}s later"
).format(
str(max_fail_count), str(wait_time_after_max_fail))
)
failed_count += 1
elif ((
datetime.datetime.now() - last_failed
).seconds > wait_time_after_max_fail):
failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not self.thread_socket_server.isAlive():
self.thread_socket_server.join()
self.thread_socket_server = None
ftrack_accessible = False
self.bool_action_thread_running = False
self.set_menu_visibility()
self.action_server.run_server()
if self.bool_action_thread_running:
log.debug("Ftrack action server has stopped")
except Exception:
log.warning(
"Ftrack Action server crashed. Trying to connect again",
exc_info=True
)
self.bool_action_server_running = False
self.set_menu_visibility()
first_check = True
_last_failed = datetime.datetime.now()
delta_time = (_last_failed - last_failed).seconds
if delta_time < min_fail_seconds:
failed_count += 1
else:
failed_count = 0
last_failed = _last_failed
time.sleep(1)
self.bool_action_thread_running = False
self.bool_action_server_running = False
self.set_menu_visibility()
def reset_action_server(self):
self.stop_action_server()
@ -126,16 +198,18 @@ class FtrackModule:
def stop_action_server(self):
try:
self.bool_action_thread_running = False
self.action_server.stop_session()
self.bool_action_server_running = False
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
if self.thread_action_server is not None:
self.thread_action_server.join()
self.thread_action_server = None
log.info("Ftrack action server was forced to stop")
self.bool_action_server_running = False
self.set_menu_visibility()
except Exception:
log.warning(
"Error has happened during Killing action server",
@ -201,9 +275,9 @@ class FtrackModule:
self.stop_timer_thread()
return
self.aRunActionS.setVisible(not self.bool_action_thread_running)
self.aRunActionS.setVisible(not self.bool_action_server_running)
self.aResetActionS.setVisible(self.bool_action_thread_running)
self.aStopActionS.setVisible(self.bool_action_thread_running)
self.aStopActionS.setVisible(self.bool_action_server_running)
if self.bool_timer_event is False:
self.start_timer_thread()

View file

@ -196,9 +196,13 @@ def any_outdated():
if representation in checked:
continue
representation_doc = io.find_one({"_id": io.ObjectId(representation),
"type": "representation"},
projection={"parent": True})
representation_doc = io.find_one(
{
"_id": io.ObjectId(representation),
"type": "representation"
},
projection={"parent": True}
)
if representation_doc and not is_latest(representation_doc):
return True
elif not representation_doc:
@ -308,27 +312,38 @@ def switch_item(container,
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
asset = io.find_one({
"name": asset_name,
"type": "asset"
})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
subset = io.find_one({
"name": subset_name,
"type": "subset",
"parent": asset["_id"]
})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[('name', -1)]
)
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
representation = io.find_one({
"name": representation_name,
"type": "representation",
"parent": version["_id"]}
)
assert representation, ("Could not find representation in the database with"
" the name '%s'" % representation_name)
@ -366,7 +381,10 @@ def get_asset(asset_name=None):
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
asset_document = io.find_one({"name": asset_name, "type": "asset"})
asset_document = io.find_one({
"name": asset_name,
"type": "asset"
})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
@ -538,8 +556,7 @@ def get_subsets(asset_name,
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset",
"name": asset_name})
asset_io = io.find_one({"type": "asset", "name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
@ -563,14 +580,20 @@ def get_subsets(asset_name,
# Process subsets
for subset in subsets:
if not version:
version_sel = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version_sel = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
else:
assert isinstance(version, int), "version needs to be `int` type"
version_sel = io.find_one({"type": "version",
"parent": subset["_id"],
"name": int(version)})
version_sel = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": int(version)
})
find_dict = {"type": "representation",
"parent": version_sel["_id"]}

View file

@ -162,6 +162,7 @@ def on_open(_):
# Validate FPS after update_task_from_path to
# ensure it is using correct FPS for the asset
lib.validate_fps()
lib.fix_incompatible_containers()
if any_outdated():
log.warning("Scene has outdated content.")

View file

@ -2318,6 +2318,25 @@ def get_attr_in_layer(attr, layer):
return cmds.getAttr(attr)
def fix_incompatible_containers():
"""Return whether the current scene has any outdated content"""
host = avalon.api.registered_host()
for container in host.ls():
loader = container['loader']
print(container['loader'])
if loader in ["MayaAsciiLoader",
"AbcLoader",
"ModelLoader",
"CameraLoader",
"RigLoader",
"FBXLoader"]:
cmds.setAttr(container["objectName"] + ".loader",
"ReferenceLoader", type="string")
def _null(*args):
pass

View file

@ -15,12 +15,13 @@ log = logging.getLogger(__name__)
def _get_menu():
"""Return the menu instance if it currently exists in Maya"""
app = QtWidgets.QApplication.instance()
widgets = dict((w.objectName(), w) for w in app.allWidgets())
widgets = dict((
w.objectName(), w) for w in QtWidgets.QApplication.allWidgets())
menu = widgets.get(self._menu)
return menu
def deferred():
log.info("Attempting to install scripts menu..")

View file

@ -33,41 +33,6 @@ if os.getenv("PYBLISH_GUI", None):
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
class NukeHandler(logging.Handler):
'''
Nuke Handler - emits logs into nuke's script editor.
warning will emit nuke.warning()
critical and fatal would popup msg dialog to alert of the error.
'''
def __init__(self):
logging.Handler.__init__(self)
self.set_name("Pype_Nuke_Handler")
def emit(self, record):
# Formated message:
msg = self.format(record)
if record.levelname.lower() in [
# "warning",
"critical",
"fatal",
"error"
]:
msg = self.format(record)
nuke.message(msg)
'''Adding Nuke Logging Handler'''
log.info([handler.get_name() for handler in logging.root.handlers[:]])
nuke_handler = NukeHandler()
if nuke_handler.get_name() \
not in [handler.get_name()
for handler in logging.root.handlers[:]]:
logging.getLogger().addHandler(nuke_handler)
logging.getLogger().setLevel(logging.INFO)
log.info([handler.get_name() for handler in logging.root.handlers[:]])
def reload_config():
"""Attempt to reload pipeline at run-time.
@ -113,7 +78,7 @@ def install():
family_states = [
"write",
"review",
"nukenodes"
"nukenodes"
"gizmo"
]

View file

@ -21,7 +21,6 @@ from .presets import (
from .presets import (
get_anatomy
)
# TODO: remove get_anatomy and import directly Anatomy() here
from pypeapp import Logger
log = Logger().get_logger(__name__, "nuke")
@ -50,8 +49,6 @@ def checkInventoryVersions():
and check if the node is having actual version. If not then it will color
it to red.
"""
# TODO: make it for all nodes not just Read (Loader
# get all Loader nodes by avalon attribute metadata
for each in nuke.allNodes():
if each.Class() == 'Read':
@ -93,7 +90,6 @@ def checkInventoryVersions():
def writes_version_sync():
''' Callback synchronizing version of publishable write nodes
'''
# TODO: make it work with new write node group
try:
rootVersion = pype.get_version_from_path(nuke.root().name())
padding = len(rootVersion)
@ -130,7 +126,8 @@ def writes_version_sync():
os.makedirs(os.path.dirname(node_new_file), 0o766)
except Exception as e:
log.warning(
"Write node: `{}` has no version in path: {}".format(each.name(), e))
"Write node: `{}` has no version in path: {}".format(
each.name(), e))
def version_up_script():
@ -183,9 +180,12 @@ def format_anatomy(data):
try:
padding = int(anatomy.templates['render']['padding'])
except KeyError as e:
log.error("`padding` key is not in `render` "
"Anatomy template. Please, add it there and restart "
"the pipeline (padding: \"4\"): `{}`".format(e))
msg = ("`padding` key is not in `render` "
"Anatomy template. Please, add it there and restart "
"the pipeline (padding: \"4\"): `{}`").format(e)
log.error(msg)
nuke.message(msg)
version = data.get("version", None)
if not version:
@ -265,7 +265,9 @@ def create_write_node(name, data, input=None, prenodes=None):
anatomy_filled = format_anatomy(data)
except Exception as e:
log.error("problem with resolving anatomy tepmlate: {}".format(e))
msg = "problem with resolving anatomy tepmlate: {}".format(e)
log.error(msg)
nuke.message(msg)
# build file path to workfiles
fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/")
@ -372,7 +374,7 @@ def create_write_node(name, data, input=None, prenodes=None):
now_node.setInput(0, prev_node)
# imprinting group node
GN = avalon.nuke.imprint(GN, data["avalon"])
avalon.nuke.imprint(GN, data["avalon"])
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@ -543,8 +545,11 @@ class WorkfileSettings(object):
viewer_dict (dict): adjustments from presets
'''
assert isinstance(viewer_dict, dict), log.error(
"set_viewers_colorspace(): argument should be dictionary")
if not isinstance(viewer_dict, dict):
msg = "set_viewers_colorspace(): argument should be dictionary"
log.error(msg)
nuke.message(msg)
return
filter_knobs = [
"viewerProcess",
@ -592,8 +597,10 @@ class WorkfileSettings(object):
root_dict (dict): adjustmensts from presets
'''
assert isinstance(root_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
if not isinstance(root_dict, dict):
msg = "set_root_colorspace(): argument should be dictionary"
log.error(msg)
nuke.message(msg)
log.debug(">> root_dict: {}".format(root_dict))
@ -638,12 +645,105 @@ class WorkfileSettings(object):
write_dict (dict): nuke write node as dictionary
'''
# TODO: complete this function so any write node in
# scene will have fixed colorspace following presets for the project
assert isinstance(write_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
if not isinstance(write_dict, dict):
msg = "set_root_colorspace(): argument should be dictionary"
log.error(msg)
return
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
from avalon.nuke import get_avalon_knob_data
for node in nuke.allNodes():
if node.Class() in ["Viewer", "Dot"]:
continue
# get data from avalon knob
avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"])
if not avalon_knob_data:
continue
if avalon_knob_data["id"] != "pyblish.avalon.instance":
continue
# establish families
families = [avalon_knob_data["family"]]
if avalon_knob_data.get("families"):
families.append(avalon_knob_data.get("families"))
# except disabled nodes but exclude backdrops in test
for fmly, knob in write_dict.items():
write = None
if (fmly in families):
# Add all nodes in group instances.
if node.Class() == "Group":
node.begin()
for x in nuke.allNodes():
if x.Class() == "Write":
write = x
node.end()
elif node.Class() == "Write":
write = node
else:
log.warning("Wrong write node Class")
write["colorspace"].setValue(str(knob["colorspace"]))
log.info(
"Setting `{0}` to `{1}`".format(
write.name(),
knob["colorspace"]))
def set_reads_colorspace(self, reads):
""" Setting colorspace to Read nodes
Looping trought all read nodes and tries to set colorspace based on regex rules in presets
"""
changes = dict()
for n in nuke.allNodes():
file = nuke.filename(n)
if not n.Class() == "Read":
continue
# load nuke presets for Read's colorspace
read_clrs_presets = get_colorspace_preset().get(
"nuke", {}).get("read", {})
# check if any colorspace presets for read is mathing
preset_clrsp = next((read_clrs_presets[k]
for k in read_clrs_presets
if bool(re.search(k, file))),
None)
log.debug(preset_clrsp)
if preset_clrsp is not None:
current = n["colorspace"].value()
future = str(preset_clrsp)
if current != future:
changes.update({
n.name(): {
"from": current,
"to": future
}
})
log.debug(changes)
if changes:
msg = "Read nodes are not set to correct colospace:\n\n"
for nname, knobs in changes.items():
msg += str(" - node: '{0}' is now '{1}' "
"but should be '{2}'\n").format(
nname, knobs["from"], knobs["to"]
)
msg += "\nWould you like to change it?"
if nuke.ask(msg):
for nname, knobs in changes.items():
n = nuke.toNode(nname)
n["colorspace"].setValue(knobs["to"])
log.info(
"Setting `{0}` to `{1}`".format(
nname,
knobs["to"]))
def set_colorspace(self):
''' Setting colorpace following presets
@ -653,25 +753,33 @@ class WorkfileSettings(object):
try:
self.set_root_colorspace(nuke_colorspace["root"])
except AttributeError:
log.error(
"set_colorspace(): missing `root` settings in template")
msg = "set_colorspace(): missing `root` settings in template"
try:
self.set_viewers_colorspace(nuke_colorspace["viewer"])
except AttributeError:
log.error(
"set_colorspace(): missing `viewer` settings in template")
msg = "set_colorspace(): missing `viewer` settings in template"
nuke.message(msg)
log.error(msg)
try:
self.set_writes_colorspace(nuke_colorspace["write"])
except AttributeError:
log.error(
"set_colorspace(): missing `write` settings in template")
msg = "set_colorspace(): missing `write` settings in template"
nuke.message(msg)
log.error(msg)
reads = nuke_colorspace.get("read")
if reads:
self.set_reads_colorspace(reads)
try:
for key in nuke_colorspace:
log.debug("Preset's colorspace key: {}".format(key))
except TypeError:
log.error("Nuke is not in templates! \n\n\n"
"contact your supervisor!")
msg = "Nuke is not in templates! Contact your supervisor!"
nuke.message(msg)
log.error(msg)
def reset_frame_range_handles(self):
"""Set frame range to current asset"""
@ -758,13 +866,13 @@ class WorkfileSettings(object):
}
if any(x for x in data.values() if x is None):
log.error(
"Missing set shot attributes in DB."
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`".format(**data)
)
msg = ("Missing set shot attributes in DB."
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
log.error(msg)
nuke.message(msg)
bbox = self._asset_entity.get('data', {}).get('crop')
@ -781,10 +889,10 @@ class WorkfileSettings(object):
)
except Exception as e:
bbox = None
log.error(
"{}: {} \nFormat:Crop need to be set with dots, example: "
"0.0.1920.1080, /nSetting to default".format(__name__, e)
)
msg = ("{}:{} \nFormat:Crop need to be set with dots, example: "
"0.0.1920.1080, /nSetting to default").format(__name__, e)
log.error(msg)
nuke.message(msg)
existing_format = None
for format in nuke.formats():
@ -1000,7 +1108,8 @@ class BuildWorkfile(WorkfileSettings):
def process(self,
regex_filter=None,
version=None,
representations=["exr", "dpx", "lutJson", "mov", "preview"]):
representations=["exr", "dpx", "lutJson", "mov",
"preview", "png"]):
"""
A short description.
@ -1041,9 +1150,10 @@ class BuildWorkfile(WorkfileSettings):
wn["render"].setValue(True)
vn.setInput(0, wn)
bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
color='0xcc1102ff', layer=-1,
nodes=[wn])
# adding backdrop under write
self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
color='0xcc1102ff', layer=-1,
nodes=[wn])
# move position
self.position_up(4)
@ -1057,10 +1167,12 @@ class BuildWorkfile(WorkfileSettings):
version=version,
representations=representations)
log.info("__ subsets: `{}`".format(subsets))
for name, subset in subsets.items():
log.debug("___________________")
log.debug(name)
log.debug(subset["version"])
nodes_backdrop = list()
for name, subset in subsets.items():
if "lut" in name:
continue
@ -1090,9 +1202,10 @@ class BuildWorkfile(WorkfileSettings):
# move position
self.position_right()
bdn = self.create_backdrop(label="Loaded Reads",
color='0x2d7702ff', layer=-1,
nodes=nodes_backdrop)
# adding backdrop under all read nodes
self.create_backdrop(label="Loaded Reads",
color='0x2d7702ff', layer=-1,
nodes=nodes_backdrop)
def read_loader(self, representation):
"""
@ -1235,6 +1348,8 @@ class ExporterReview:
# get first and last frame
self.first_frame = min(self.collection.indexes)
self.last_frame = max(self.collection.indexes)
if "slate" in self.instance.data["families"]:
self.first_frame += 1
else:
self.fname = os.path.basename(self.path_in)
self.fhead = os.path.splitext(self.fname)[0] + "."
@ -1254,7 +1369,7 @@ class ExporterReview:
'ext': self.ext,
'files': self.file,
"stagingDir": self.staging_dir,
"anatomy_template": "publish",
"anatomy_template": "render",
"tags": [self.name.replace("_", "-")] + add_tags
}
@ -1460,14 +1575,13 @@ class ExporterReviewMov(ExporterReview):
self.log.info("Rendered...")
def save_file(self):
import shutil
with anlib.maintained_selection():
self.log.info("Saving nodes as file... ")
# select temp nodes
anlib.select_nodes(self._temp_nodes)
# create nk path
path = os.path.splitext(self.path)[0] + ".nk"
# save file to the path
nuke.nodeCopy(path)
shutil.copyfile(self.instance.context.data["currentFile"], path)
self.log.info("Nodes exported...")
return path
@ -1508,19 +1622,21 @@ class ExporterReviewMov(ExporterReview):
# Write node
write_node = nuke.createNode("Write")
self.log.debug("Path: {}".format(self.path))
self.instance.data["baked_colorspace_movie"] = self.path
write_node["file"].setValue(self.path)
write_node["file_type"].setValue(self.ext)
write_node["meta_codec"].setValue("ap4h")
write_node["mov64_codec"].setValue("ap4h")
write_node["mov64_write_timecode"].setValue(1)
write_node["raw"].setValue(1)
# connect
write_node.setInput(0, self.previous_node)
self._temp_nodes.append(write_node)
self.log.debug("Write... `{}`".format(self._temp_nodes))
# ---------- end nodes creation
# ---------- render or save to nk
if farm:
nuke.scriptSave()
path_nk = self.save_file()
self.data.update({
"bakeScriptPath": path_nk,
@ -1537,9 +1653,9 @@ class ExporterReviewMov(ExporterReview):
self.log.debug("Representation... `{}`".format(self.data))
#---------- Clean up
# ---------- Clean up
self.clean_nodes()
nuke.scriptSave()
return self.data
@ -1578,3 +1694,70 @@ def get_dependent_nodes(nodes):
})
return connections_in, connections_out
def find_free_space_to_paste_nodes(
nodes,
group=nuke.root(),
direction="right",
offset=300):
"""
For getting coordinates in DAG (node graph) for placing new nodes
Arguments:
nodes (list): list of nuke.Node objects
group (nuke.Node) [optional]: object in which context it is
direction (str) [optional]: where we want it to be placed
[left, right, top, bottom]
offset (int) [optional]: what offset it is from rest of nodes
Returns:
xpos (int): x coordinace in DAG
ypos (int): y coordinace in DAG
"""
if len(nodes) == 0:
return 0, 0
group_xpos = list()
group_ypos = list()
# get local coordinates of all nodes
nodes_xpos = [n.xpos() for n in nodes] + \
[n.xpos() + n.screenWidth() for n in nodes]
nodes_ypos = [n.ypos() for n in nodes] + \
[n.ypos() + n.screenHeight() for n in nodes]
# get complete screen size of all nodes to be placed in
nodes_screen_width = max(nodes_xpos) - min(nodes_xpos)
nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos)
# get screen size (r,l,t,b) of all nodes in `group`
with group:
group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \
[n.xpos() + n.screenWidth() for n in nuke.allNodes()
if n not in nodes]
group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \
[n.ypos() + n.screenHeight() for n in nuke.allNodes()
if n not in nodes]
# calc output left
if direction in "left":
xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset)
ypos = min(group_ypos)
return xpos, ypos
# calc output right
if direction in "right":
xpos = max(group_xpos) + abs(offset)
ypos = min(group_ypos)
return xpos, ypos
# calc output top
if direction in "top":
xpos = min(group_xpos)
ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset)
return xpos, ypos
# calc output bottom
if direction in "bottom":
xpos = min(group_xpos)
ypos = max(group_ypos) + abs(offset)
return xpos, ypos

View file

@ -1,6 +1,6 @@
from pype import api as pype
from pypeapp import Anatomy, config
import nuke
log = pype.Logger().get_logger(__name__, "nuke")
@ -28,7 +28,7 @@ def get_node_dataflow_preset(**kwarg):
families = kwarg.get("families", [])
preset = kwarg.get("preset", None) # omit < 2.0.0v
assert any([host, cls]), log.error(
assert any([host, cls]), nuke.message(
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
nuke_dataflow = get_dataflow_preset().get(str(host), None)
@ -56,8 +56,10 @@ def get_node_colorspace_preset(**kwarg):
families = kwarg.get("families", [])
preset = kwarg.get("preset", None) # omit < 2.0.0v
assert any([host, cls]), log.error(
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
if not any([host, cls]):
msg = "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)
log.error(msg)
nuke.message(msg)
nuke_colorspace = get_colorspace_preset().get(str(host), None)
nuke_colorspace_node = nuke_colorspace.get(str(cls), None)

View file

@ -51,7 +51,7 @@ if os.getenv("PYBLISH_GUI", None):
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
def install(config):
def install():
"""
Installing Nukestudio integration for avalon

View file

@ -0,0 +1,32 @@
"""Create a model asset."""
import bpy
from avalon import api
from avalon.blender import Creator, lib
class CreateModel(Creator):
"""Polygonal static geometry"""
name = "modelMain"
label = "Model"
family = "model"
icon = "cube"
def process(self):
import pype.blender
asset = self.data["asset"]
subset = self.data["subset"]
name = pype.blender.plugin.model_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,315 @@
"""Load a model asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import avalon.blender.pipeline
import bpy
import pype.blender
from avalon import api
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
class BlendModelLoader(pype.blender.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["model"]
representations = ["blend"]
label = "Link Model"
icon = "code-fork"
color = "orange"
@staticmethod
def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]:
"""Find the collection(s) with name, loaded from libpath.
Note:
It is assumed that only 1 matching collection is found.
"""
for collection in bpy.data.collections:
if collection.name != name:
continue
if collection.library is None:
continue
if not collection.library.filepath:
continue
collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve())
normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve())
if collection_lib_path == normalized_libpath:
return collection
return None
@staticmethod
def _collection_contains_object(
collection: bpy.types.Collection, object: bpy.types.Object
) -> bool:
"""Check if the collection contains the object."""
for obj in collection.objects:
if obj == object:
return True
return False
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.blender.plugin.model_name(asset, subset)
container_name = pype.blender.plugin.model_name(
asset, subset, namespace
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
if not instance_empty.get("avalon"):
instance_empty["avalon"] = dict()
avalon_info = instance_empty["avalon"]
avalon_info.update({"container_name": container_name})
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
container = bpy.data.collections[lib_container]
container.name = container_name
instance_empty.instance_collection = container
container.make_local()
avalon.blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
nodes = list(container.objects)
nodes.append(container)
nodes.append(instance_empty)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.debug(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_libpath = (
self._get_library_from_container(collection).filepath
)
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
# Let Blender's garbage collection take care of removing the library
# itself after removing the objects.
objects_to_remove = set()
collection_objects = list()
collection_objects[:] = collection.objects
for obj in collection_objects:
# Unlink every object
collection.objects.unlink(obj)
remove_obj = True
for coll in [
coll for coll in bpy.data.collections
if coll != collection
]:
if (
coll.objects and
self._collection_contains_object(coll, obj)
):
remove_obj = False
if remove_obj:
objects_to_remove.add(obj)
for obj in objects_to_remove:
# Only delete objects that are not used elsewhere
bpy.data.objects.remove(obj)
instance_empties = [
obj for obj in collection.users_dupli_group
if obj.name in collection.name
]
if instance_empties:
instance_empty = instance_empties[0]
container_name = instance_empty["avalon"]["container_name"]
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [container_name]
new_collection = self._get_lib_collection(container_name, libpath)
if new_collection is None:
raise ValueError(
"A matching collection '{container_name}' "
"should have been found in: {libpath}"
)
for obj in new_collection.objects:
collection.objects.link(obj)
bpy.data.collections.remove(new_collection)
# Update the representation on the collection
avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY]
avalon_prop["representation"] = str(representation["_id"])
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
instance_parents = list(collection.users_dupli_group)
instance_objects = list(collection.objects)
for obj in instance_objects + instance_parents:
bpy.data.objects.remove(obj)
bpy.data.collections.remove(collection)
return True
class CacheModelLoader(pype.blender.AssetLoader):
"""Load cache models.
Stores the imported asset in a collection named after the asset.
Note:
At least for now it only supports Alembic files.
"""
families = ["model"]
representations = ["abc"]
label = "Link Model"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
raise NotImplementedError("Loading of Alembic files is not yet implemented.")
# TODO (jasper): implement Alembic import.
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
lib_container = container_name = (
pype.blender.plugin.model_name(asset, subset, namespace)
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (data_from, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
collection = bpy.data.collections[lib_container]
collection.name = container_name
instance_empty.instance_collection = collection
nodes = list(collection.objects)
nodes.append(collection)
nodes.append(instance_empty)
self[:] = nodes
return nodes

View file

@ -0,0 +1,16 @@
import bpy
import pyblish.api
class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Blender Current File"
hosts = ['blender']
def process(self, context):
"""Inject the current working file"""
current_file = bpy.data.filepath
context.data['currentFile'] = current_file

View file

@ -0,0 +1,53 @@
import typing
from typing import Generator
import bpy
import avalon.api
import pyblish.api
from avalon.blender.pipeline import AVALON_PROPERTY
class CollectModel(pyblish.api.ContextPlugin):
"""Collect the data of a model."""
hosts = ["blender"]
label = "Collect Model"
order = pyblish.api.CollectorOrder
@staticmethod
def get_model_collections() -> Generator:
"""Return all 'model' collections.
Check if the family is 'model' and if it doesn't have the
representation set. If the representation is set, it is a loaded model
and we don't want to publish it.
"""
for collection in bpy.data.collections:
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
if (avalon_prop.get('family') == 'model'
and not avalon_prop.get('representation')):
yield collection
def process(self, context):
"""Collect the models from the current Blender scene."""
collections = self.get_model_collections()
for collection in collections:
avalon_prop = collection[AVALON_PROPERTY]
asset = avalon_prop['asset']
family = avalon_prop['family']
subset = avalon_prop['subset']
task = avalon_prop['task']
name = f"{asset}_{subset}"
instance = context.create_instance(
name=name,
family=family,
families=[family],
subset=subset,
asset=asset,
task=task,
)
members = list(collection.objects)
members.append(collection)
instance[:] = members
self.log.debug(instance.data)

View file

@ -0,0 +1,47 @@
import os
import avalon.blender.workio
import pype.api
class ExtractModel(pype.api.Extractor):
"""Extract as model."""
label = "Model"
hosts = ["blender"]
families = ["model"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Just save the file to a temporary location. At least for now it's no
# problem to have (possibly) extra stuff in the file.
avalon.blender.workio.save_file(filepath, copy=True)
#
# # Store reference for integration
# if "files" not in instance.data:
# instance.data["files"] = list()
#
# # instance.data["files"].append(filename)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s", instance.name, representation)

View file

@ -0,0 +1,49 @@
from typing import List
import bpy
import pyblish.api
import pype.blender.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
label = "Mesh Has UV's"
actions = [pype.blender.action.SelectInvalidAction]
optional = True
@staticmethod
def has_uvs(obj: bpy.types.Object) -> bool:
"""Check if an object has uv's."""
if not obj.data.uv_layers:
return False
for uv_layer in obj.data.uv_layers:
for polygon in obj.data.polygons:
for loop_index in polygon.loop_indices:
if not uv_layer.data[loop_index].uv:
return False
return True
@classmethod
def get_invalid(cls, instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
# Make sure we are in object mode.
bpy.ops.object.mode_set(mode='OBJECT')
if not cls.has_uvs(obj):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}")

View file

@ -0,0 +1,35 @@
from typing import List
import bpy
import pyblish.api
import pype.blender.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
label = "Mesh No Negative Scale"
actions = [pype.blender.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
if any(v < 0 for v in obj.scale):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Meshes found in instance with negative scale: {invalid}"
)

View file

@ -7,8 +7,9 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin):
"""Create comments in Ftrack."""
order = pyblish.api.IntegratorOrder
label = "Integrate Comments to Ftrack."
label = "Integrate Comments to Ftrack"
families = ["shot"]
enabled = False
def process(self, instance):
session = instance.context.data["ftrackSession"]

View file

@ -23,25 +23,43 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
# Collect session
session = ftrack_api.Session()
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
context.data["ftrackSession"] = session
# Collect task
project = os.environ.get('AVALON_PROJECT', '')
asset = os.environ.get('AVALON_ASSET', '')
task = os.environ.get('AVALON_TASK', None)
self.log.debug(task)
project_name = os.environ.get('AVALON_PROJECT', '')
asset_name = os.environ.get('AVALON_ASSET', '')
task_name = os.environ.get('AVALON_TASK', None)
# Find project entity
project_query = 'Project where full_name is "{0}"'.format(project_name)
self.log.debug("Project query: < {0} >".format(project_query))
project_entity = session.query(project_query).one()
self.log.debug("Project found: {0}".format(project_entity))
# Find asset entity
entity_query = (
'TypedContext where project_id is "{0}"'
' and name is "{1}"'
).format(project_entity["id"], asset_name)
self.log.debug("Asset entity query: < {0} >".format(entity_query))
asset_entity = session.query(entity_query).one()
self.log.debug("Asset found: {0}".format(asset_entity))
# Find task entity if task is set
if task_name:
task_query = (
'Task where name is "{0}" and parent_id is "{1}"'
).format(task_name, asset_entity["id"])
self.log.debug("Task entity query: < {0} >".format(task_query))
task_entity = session.query(task_query).one()
self.log.debug("Task entity found: {0}".format(task_entity))
if task:
result = session.query('Task where\
project.full_name is "{0}" and\
name is "{1}" and\
parent.name is "{2}"'.format(project, task, asset)).one()
context.data["ftrackTask"] = result
else:
result = session.query('TypedContext where\
project.full_name is "{0}" and\
name is "{1}"'.format(project, asset)).one()
context.data["ftrackEntity"] = result
task_entity = None
self.log.warning("Task name is not set.")
self.log.info(result)
context.data["ftrackProject"] = asset_entity
context.data["ftrackEntity"] = asset_entity
context.data["ftrackTask"] = task_entity

View file

@ -77,6 +77,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
info_msg = "Created new {entity_type} with data: {data}"
info_msg += ", metadata: {metadata}."
used_asset_versions = []
# Iterate over components and publish
for data in instance.data.get("ftrackComponentsList", []):
@ -148,6 +149,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
assetversion_cust_attrs = _assetversion_data.pop(
"custom_attributes", {}
)
asset_version_comment = _assetversion_data.pop(
"comment", None
)
assetversion_data.update(_assetversion_data)
assetversion_entity = session.query(
@ -185,6 +189,20 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
existing_assetversion_metadata.update(assetversion_metadata)
assetversion_entity["metadata"] = existing_assetversion_metadata
# Add comment
if asset_version_comment:
assetversion_entity["comment"] = asset_version_comment
try:
session.commit()
except Exception:
session.rollback()
self.log.warning((
"Comment was not possible to set for AssetVersion"
"\"{0}\". Can't set it's value to: \"{1}\""
).format(
assetversion_entity["id"], str(asset_version_comment)
))
# Adding Custom Attributes
for attr, val in assetversion_cust_attrs.items():
if attr in assetversion_entity["custom_attributes"]:
@ -369,3 +387,14 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
tp, value, tb = sys.exc_info()
session.rollback()
six.reraise(tp, value, tb)
if assetversion_entity not in used_asset_versions:
used_asset_versions.append(assetversion_entity)
asset_versions_key = "ftrackIntegratedAssetVersions"
if asset_versions_key not in instance.data:
instance.data[asset_versions_key] = []
for asset_version in used_asset_versions:
if asset_version not in instance.data[asset_versions_key]:
instance.data[asset_versions_key].append(asset_version)

View file

@ -0,0 +1,51 @@
import sys
import pyblish.api
import six
class IntegrateFtrackNote(pyblish.api.InstancePlugin):
"""Create comments in Ftrack."""
# Must be after integrate asset new
order = pyblish.api.IntegratorOrder + 0.4999
label = "Integrate Ftrack note"
families = ["ftrack"]
optional = True
def process(self, instance):
comment = (instance.context.data.get("comment") or "").strip()
if not comment:
self.log.info("Comment is not set.")
return
self.log.debug("Comment is set to {}".format(comment))
asset_versions_key = "ftrackIntegratedAssetVersions"
asset_versions = instance.data.get(asset_versions_key)
if not asset_versions:
self.log.info("There are any integrated AssetVersions")
return
session = instance.context.data["ftrackSession"]
user = session.query(
"User where username is \"{}\"".format(session.api_user)
).first()
if not user:
self.log.warning(
"Was not able to query current User {}".format(
session.api_user
)
)
for asset_version in asset_versions:
asset_version.create_note(comment, author=user)
try:
session.commit()
self.log.debug("Note added to AssetVersion \"{}\"".format(
str(asset_version)
))
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
six.reraise(tp, value, tb)

View file

@ -11,13 +11,13 @@ class IntegrateCleanComponentData(pyblish.api.InstancePlugin):
label = 'Clean component data'
families = ["ftrack"]
optional = True
active = True
active = False
def process(self, instance):
for comp in instance.data['representations']:
self.log.debug('component {}'.format(comp))
if "%" in comp['published_path'] or "#" in comp['published_path']:
continue

View file

@ -15,4 +15,5 @@ class CollectComment(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder
def process(self, context):
context.data["comment"] = ""
comment = (context.data.get("comment") or "").strip()
context.data["comment"] = comment

View file

@ -0,0 +1,18 @@
"""These data *must* be collected only once during publishing process.
Provides:
context -> datetimeData
"""
import pyblish.api
from pypeapp import config
class CollectDateTimeData(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder
label = "Collect DateTime data"
def process(self, context):
key = "datetimeData"
if key not in context.data:
context.data[key] = config.get_datetime_data()

View file

@ -12,7 +12,6 @@ import os
import re
import copy
import json
from pprint import pformat
import pyblish.api
from avalon import api
@ -91,13 +90,22 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder - 0.0001
targets = ["filesequence"]
label = "RenderedFrames"
def process(self, context):
pixel_aspect = 1
resolution_width = 1920
resolution_height = 1080
lut_path = None
slate_frame = None
families_data = None
baked_mov_path = None
subset = None
version = None
frame_start = 0
frame_end = 0
if os.environ.get("PYPE_PUBLISH_PATHS"):
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
self.log.info("Collecting paths: {}".format(paths))
@ -123,6 +131,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
cwd = os.path.dirname(path)
root_override = data.get("root")
frame_start = int(data.get("frameStart"))
frame_end = int(data.get("frameEnd"))
subset = data.get("subset")
if root_override:
if os.path.isabs(root_override):
root = root_override
@ -146,13 +158,16 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
os.environ.update(session)
instance = metadata.get("instance")
if instance:
# here is the place to add ability for nuke noninteractive
# ______________________________________
instance_family = instance.get("family")
pixel_aspect = instance.get("pixelAspect", 1)
resolution_width = instance.get("resolutionWidth", 1920)
resolution_height = instance.get("resolutionHeight", 1080)
lut_path = instance.get("lutPath", None)
baked_mov_path = instance.get("bakeRenderPath")
families_data = instance.get("families")
slate_frame = instance.get("slateFrame")
version = instance.get("version")
else:
# Search in directory
@ -160,35 +175,40 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
root = path
self.log.info("Collecting: {}".format(root))
regex = data.get("regex")
if baked_mov_path:
regex = "^{}.*$".format(subset)
if regex:
self.log.info("Using regex: {}".format(regex))
if "slate" in families_data:
frame_start -= 1
collections, remainder = collect(
root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
frame_start=data.get("frameStart"),
frame_end=data.get("frameEnd"),
frame_start=frame_start,
frame_end=frame_end,
)
self.log.info("Found collections: {}".format(collections))
"""
if data.get("subset"):
# If subset is provided for this json then it must be a single
# collection.
if len(collections) > 1:
self.log.error("Forced subset can only work with a single "
"found sequence")
raise RuntimeError("Invalid sequence")
"""
self.log.info("Found remainder: {}".format(remainder))
fps = data.get("fps", 25)
# adding publish comment and intent to context
context.data["comment"] = data.get("comment", "")
context.data["intent"] = data.get("intent", "")
if data.get("user"):
context.data["user"] = data["user"]
if data.get("version"):
version = data.get("version")
# Get family from the data
families = data.get("families", ["render"])
if "render" not in families:
@ -197,6 +217,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
families.append("ftrack")
if "write" in instance_family:
families.append("write")
if families_data and "slate" in families_data:
families.append("slate")
if data.get("attachTo"):
# we need to attach found collections to existing
@ -217,11 +239,13 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": data.get("frameStart"),
"frameEnd": data.get("frameEnd"),
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height
})
if "representations" not in instance.data:
@ -246,31 +270,47 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
instance.data["representations"].append(
representation)
elif data.get("subset"):
elif subset:
# if we have subset - add all collections and known
# reminder as representations
# take out review family if mov path
# this will make imagesequence none review
if baked_mov_path:
self.log.info(
"Baked mov is available {}".format(
baked_mov_path))
families.append("review")
if session['AVALON_APP'] == "maya":
families.append("review")
self.log.info(
"Adding representations to subset {}".format(
data.get("subset")))
subset))
instance = context.create_instance(data.get("subset"))
instance = context.create_instance(subset)
data = copy.deepcopy(data)
instance.data.update(
{
"name": data.get("subset"),
"name": subset,
"family": families[0],
"families": list(families),
"subset": data.get("subset"),
"subset": subset,
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": data.get("frameStart"),
"frameEnd": data.get("frameEnd"),
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"slateFrame": slate_frame,
"version": version
}
)
@ -282,31 +322,53 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
ext = collection.tail.lstrip(".")
if "slate" in instance.data["families"]:
frame_start += 1
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": frame_start,
"frameEnd": frame_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
"tags": ["review"] if not baked_mov_path else [],
}
instance.data["representations"].append(
representation)
# filter out only relevant mov in case baked available
self.log.debug("__ remainder {}".format(remainder))
if baked_mov_path:
remainder = [r for r in remainder
if r in baked_mov_path]
self.log.debug("__ remainder {}".format(remainder))
# process reminders
for rem in remainder:
# add only known types to representation
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
self.log.info(" . {}".format(rem))
if "slate" in instance.data["families"]:
frame_start += 1
tags = ["review"]
if baked_mov_path:
tags.append("delete")
representation = {
"name": rem.split(".")[-1],
"ext": "{}".format(rem.split(".")[-1]),
"files": rem,
"stagingDir": root,
"frameStart": frame_start,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
"tags": tags
}
instance.data["representations"].append(
representation)
@ -348,6 +410,9 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"version": version
}
)
if lut_path:
@ -363,9 +428,26 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": start,
"frameEnd": end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
}
instance.data["representations"].append(representation)
# temporary ... allow only beauty on ftrack
if session['AVALON_APP'] == "maya":
AOV_filter = ['beauty']
for aov in AOV_filter:
if aov not in instance.data['subset']:
instance.data['families'].remove('review')
instance.data['families'].remove('ftrack')
representation["tags"].remove('review')
self.log.debug(
"__ representations {}".format(
instance.data["representations"]))
self.log.debug(
"__ instance.data {}".format(instance.data))

View file

@ -31,32 +31,44 @@ class CollectTemplates(pyblish.api.InstancePlugin):
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
@ -66,6 +78,8 @@ class CollectTemplates(pyblish.api.InstancePlugin):
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*hierarchy)
else:
hierarchy = ""
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
@ -78,6 +92,21 @@ class CollectTemplates(pyblish.api.InstancePlugin):
"hierarchy": hierarchy.replace("\\", "/"),
"representation": "TEMP"}
# Add datetime data to template data
datetime_data = instance.context.data.get("datetimeData") or {}
template_data.update(datetime_data)
resolution_width = instance.data.get("resolutionWidth")
resolution_height = instance.data.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data

View file

@ -4,6 +4,7 @@ import copy
import pype.api
import pyblish
from pypeapp import config
class ExtractBurnin(pype.api.Extractor):
@ -15,7 +16,7 @@ class ExtractBurnin(pype.api.Extractor):
`tags` including `burnin`
"""
label = "Quicktime with burnins"
label = "Extract burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
hosts = ["nuke", "maya", "shell"]
@ -25,11 +26,8 @@ class ExtractBurnin(pype.api.Extractor):
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
# TODO: expand burnin data list to include all usefull keys
version = ''
if instance.context.data.get('version'):
version = "v" + str(instance.context.data['version'])
version = instance.context.data.get(
'version', instance.data.get('version'))
frame_start = int(instance.data.get("frameStart") or 0)
frame_end = int(instance.data.get("frameEnd") or 1)
duration = frame_end - frame_start + 1
@ -41,10 +39,31 @@ class ExtractBurnin(pype.api.Extractor):
"frame_start": frame_start,
"frame_end": frame_end,
"duration": duration,
"version": version,
"comment": instance.context.data.get("comment"),
"intent": instance.context.data.get("intent")
"version": int(version),
"comment": instance.context.data.get("comment", ""),
"intent": instance.context.data.get("intent", "")
}
# Add datetime data to preparation data
datetime_data = instance.context.data.get("datetimeData") or {}
prep_data.update(datetime_data)
slate_frame_start = frame_start
slate_frame_end = frame_end
slate_duration = duration
# exception for slate workflow
if "slate" in instance.data["families"]:
slate_frame_start = frame_start - 1
slate_frame_end = frame_end
slate_duration = slate_frame_end - slate_frame_start + 1
prep_data.update({
"slate_frame_start": slate_frame_start,
"slate_frame_end": slate_frame_end,
"slate_duration": slate_duration
})
# Update data with template data
template_data = instance.data.get("assumedTemplateData") or {}
prep_data.update(template_data)
@ -59,26 +78,39 @@ class ExtractBurnin(pype.api.Extractor):
if "burnin" not in repre.get("tags", []):
continue
is_sequence = "sequence" in repre.get("tags", [])
stagingdir = repre["stagingDir"]
filename = "{0}".format(repre["files"])
if is_sequence:
filename = repre["sequence_file"]
name = "_burnin"
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
ext = os.path.splitext(filename)[1]
movieFileBurnin = filename.replace(ext, "") + name + ext
if is_sequence:
fn_splt = filename.split(".")
movieFileBurnin = ".".join(
((fn_splt[0] + name), fn_splt[-2], fn_splt[-1]))
self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin))
full_movie_path = os.path.join(
os.path.normpath(stagingdir), repre["files"]
)
os.path.normpath(stagingdir), filename)
full_burnin_path = os.path.join(
os.path.normpath(stagingdir), movieFileBurnin
)
os.path.normpath(stagingdir), movieFileBurnin)
self.log.debug("__ full_movie_path: {}".format(full_movie_path))
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
# create copy of prep_data for anatomy formatting
_prep_data = copy.deepcopy(prep_data)
_prep_data["representation"] = repre["name"]
_prep_data["anatomy"] = (
anatomy.format_all(_prep_data).get("solved") or {}
)
filled_anatomy = anatomy.format_all(_prep_data)
_prep_data["anatomy"] = filled_anatomy.get_solved()
burnin_data = {
"input": full_movie_path.replace("\\", "/"),
"codec": repre.get("codec", []),
@ -125,15 +157,35 @@ class ExtractBurnin(pype.api.Extractor):
self.log.debug("Output: {}".format(output))
repre_update = {
"anatomy_template": "render",
"files": movieFileBurnin,
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]
}
if is_sequence:
burnin_seq_files = list()
for frame_index in range(_prep_data["duration"] + 1):
if frame_index == 0:
continue
burnin_seq_files.append(movieFileBurnin % frame_index)
repre_update.update({
"files": burnin_seq_files
})
instance.data["representations"][i].update(repre_update)
# removing the source mov file
os.remove(full_movie_path)
self.log.debug("Removed: `{}`".format(full_movie_path))
if is_sequence:
for frame_index in range(_prep_data["duration"] + 1):
if frame_index == 0:
continue
rm_file = full_movie_path % frame_index
os.remove(rm_file)
self.log.debug("Removed: `{}`".format(rm_file))
else:
os.remove(full_movie_path)
self.log.debug("Removed: `{}`".format(full_movie_path))
# Remove any representations tagged for deletion.
for repre in instance.data["representations"]:

View file

@ -6,7 +6,7 @@ import pype.api
class ExtractJpegEXR(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
"""Resolve any dependency issues
This plug-in resolves any paths which, if not updated might break
the published file.
@ -20,6 +20,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
hosts = ["shell"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
enabled = False
def process(self, instance):
start = instance.data.get("frameStart")
@ -28,51 +29,74 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
collected_frames = os.listdir(stagingdir)
collections, remainder = clique.assemble(collected_frames)
input_file = (
collections[0].format('{head}{padding}{tail}') % start
)
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
self.log.info("subset {}".format(instance.data['subset']))
if 'crypto' in instance.data['subset']:
return
filename = collections[0].format('{head}')
if not filename.endswith('.'):
filename += "."
jpegFile = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpegFile)
# get representation and loop them
representations = instance.data["representations"]
self.log.info("output {}".format(full_output_path))
# filter out mov and img sequences
representations_new = representations[:]
config_data = instance.context.data['output_repre_config']
for repre in representations:
self.log.debug(repre)
if 'review' not in repre['tags']:
return
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
input_file = repre['files'][0]
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start
# )
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
subprocess_jpeg = " ".join(jpeg_items)
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpeg_file = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpeg_file)
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
self.log.info("output {}".format(full_output_path))
if "representations" not in instance.data:
instance.data["representations"] = []
config_data = instance.context.data['output_repre_config']
representation = {
'name': 'jpg',
'ext': 'jpg',
'files': jpegFile,
"stagingDir": stagingdir,
"thumbnail": True
}
instance.data["representations"].append(representation)
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
subprocess_jpeg = " ".join(jpeg_items)
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'thumbnail',
'ext': 'jpg',
'files': jpeg_file,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ['thumbnail']
}
# adding representation
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
instance.data["representations"] = representations_new

View file

@ -32,13 +32,13 @@ class ExtractReview(pyblish.api.InstancePlugin):
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("frameStart")
resolution_width = instance.data.get("resolutionWidth", to_width)
resolution_height = instance.data.get("resolutionHeight", to_height)
pixel_aspect = instance.data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(instance.data["families"]))
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(inst_data["families"]))
# get representation and loop them
representations = instance.data["representations"]
representations = inst_data["representations"]
# filter out mov and img sequences
representations_new = representations[:]
@ -46,21 +46,39 @@ class ExtractReview(pyblish.api.InstancePlugin):
if repre['ext'] in self.ext_filter:
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" in tags:
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
self.log.debug("Profile name: {}".format(name))
ext = profile.get("ext", None)
if not ext:
ext = "mov"
self.log.warning(
"`ext` attribute not in output profile. Setting to default ext: `mov`")
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug("instance.families: {}".format(instance.data['families']))
self.log.debug("profile.families: {}".format(profile['families']))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
if any(item in instance.data['families'] for item in profile['families']):
if isinstance(repre["files"], list):
@ -81,18 +99,22 @@ class ExtractReview(pyblish.api.InstancePlugin):
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
repre_new = repre.copy()
new_tags = [x for x in tags if x != "delete"]
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# add families
[instance.data["families"].append(t)
for t in p_tags
@ -115,8 +137,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append("-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
@ -180,14 +203,19 @@ class ExtractReview(pyblish.api.InstancePlugin):
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
# TODO: it might still be failing in some cases
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(
"-filter:v scale={0}x{1}:flags=lanczos,setsar=1,drawbox=0:0:iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{2})))/2):iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black".format(ffmpet_width, ffmpet_height, lb))
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
@ -195,9 +223,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
# output filename
output_args.append(full_output_path)
self.log.debug("__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug("__ resolution_width: `{}`".format(resolution_width))
self.log.debug("__ resolution_height: `{}`".format(resolution_height))
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
@ -212,22 +245,34 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(resolution_width)
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug("__ width_scale: `{}`".format(width_scale))
self.log.debug("__ width_half_pad: `{}`".format(width_half_pad))
self.log.debug("__ height_scale: `{}`".format(height_scale))
self.log.debug("__ height_half_pad: `{}`".format(height_half_pad))
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
)
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
@ -255,7 +300,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug("_ output_args: `{}`".format(output_args))
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
os.path.join(
@ -279,8 +333,17 @@ class ExtractReview(pyblish.api.InstancePlugin):
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):

View file

@ -0,0 +1,243 @@
import os
import pype.api
import pyblish
class ExtractReviewSlate(pype.api.Extractor):
"""
Will add slate frame at the start of the video files
"""
label = "Review with Slate frame"
order = pyblish.api.ExtractorOrder + 0.031
families = ["slate"]
hosts = ["nuke", "maya", "shell"]
optional = True
def process(self, instance):
inst_data = instance.data
if "representations" not in inst_data:
raise RuntimeError("Burnin needs already created mov to work on.")
suffix = "_slate"
slate_path = inst_data.get("slateFrame")
ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg")
to_width = 1920
to_height = 1080
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
fps = inst_data.get("fps")
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
for i, repre in enumerate(inst_data["representations"]):
_remove_at_end = []
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
p_tags = repre.get("tags", [])
if "slate-frame" not in p_tags:
continue
stagingdir = repre["stagingDir"]
input_file = "{0}".format(repre["files"])
ext = os.path.splitext(input_file)[1]
output_file = input_file.replace(ext, "") + suffix + ext
input_path = os.path.join(
os.path.normpath(stagingdir), repre["files"])
self.log.debug("__ input_path: {}".format(input_path))
_remove_at_end.append(input_path)
output_path = os.path.join(
os.path.normpath(stagingdir), output_file)
self.log.debug("__ output_path: {}".format(output_path))
input_args = []
output_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(repre["_profile"].get('input', []))
input_args.append("-loop 1 -i {}".format(slate_path))
input_args.extend([
"-r {}".format(fps),
"-t 0.04"]
)
# output args
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
# make sure colors are correct
output_args.extend([
"-vf scale=out_color_matrix=bt709",
"-color_primaries bt709",
"-color_trc bt709",
"-colorspace bt709"
])
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
slate_v_path = slate_path.replace(".png", ext)
output_args.append(slate_v_path)
_remove_at_end.append(slate_v_path)
slate_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
slate_subprcs_cmd = " ".join(slate_args)
# run slate generation subprocess
self.log.debug("Slate Executing: {}".format(slate_subprcs_cmd))
slate_output = pype.api.subprocess(slate_subprcs_cmd)
self.log.debug("Slate Output: {}".format(slate_output))
# create ffmpeg concat text file path
conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt"
conc_text_path = os.path.join(
os.path.normpath(stagingdir), conc_text_file)
_remove_at_end.append(conc_text_path)
self.log.debug("__ conc_text_path: {}".format(conc_text_path))
new_line = "\n"
with open(conc_text_path, "w") as conc_text_f:
conc_text_f.writelines([
"file {}".format(
slate_v_path.replace("\\", "/")),
new_line,
"file {}".format(input_path.replace("\\", "/"))
])
# concat slate and videos together
conc_input_args = ["-y", "-f concat", "-safe 0"]
conc_input_args.append("-i {}".format(conc_text_path))
conc_output_args = ["-c copy"]
conc_output_args.append(output_path)
concat_args = [
ffmpeg_path,
" ".join(conc_input_args),
" ".join(conc_output_args)
]
concat_subprcs_cmd = " ".join(concat_args)
# ffmpeg concat subprocess
self.log.debug("Executing concat: {}".format(concat_subprcs_cmd))
concat_output = pype.api.subprocess(concat_subprcs_cmd)
self.log.debug("Output concat: {}".format(concat_output))
self.log.debug("__ repre[tags]: {}".format(repre["tags"]))
repre_update = {
"files": output_file,
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]
}
inst_data["representations"][i].update(repre_update)
self.log.debug(
"_ representation {}: `{}`".format(
i, inst_data["representations"][i]))
# removing temp files
for f in _remove_at_end:
os.remove(f)
self.log.debug("Removed: `{}`".format(f))
# Remove any representations tagged for deletion.
for repre in inst_data.get("representations", []):
if "delete" in repre.get("tags", []):
self.log.debug("Removing representation: {}".format(repre))
inst_data["representations"].remove(repre)
self.log.debug(inst_data["representations"])
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Args:
args (list): list of string arguments
inserting_arg (str): string argument we want to add
(without flag `-vf`)
Returns:
str: long joined argument to be added back to list of arguments
"""
# find all video format settings
vf_settings = [p for p in args
for v in ["-filter:v", "-vf"]
if v in p]
self.log.debug("_ vf_settings: `{}`".format(vf_settings))
# remove them from output args list
for p in vf_settings:
self.log.debug("_ remove p: `{}`".format(p))
args.remove(p)
self.log.debug("_ args: `{}`".format(args))
# strip them from all flags
vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "")
for p in vf_settings]
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
vf_fixed.insert(0, inserting_arg)
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
# create new video filter setting
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back

View file

@ -24,7 +24,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = ["assembly"]
families = []
exclude_families = ["clip"]
def process(self, instance):
@ -84,9 +84,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -94,10 +96,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -318,9 +324,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]

View file

@ -82,31 +82,40 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:

View file

@ -7,7 +7,7 @@ import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
from pathlib2 import Path
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
@ -76,8 +76,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"source",
"matchmove",
"image"
"source",
"assembly"
]
exclude_families = ["clip"]
db_representation_context_keys = [
"project", "asset", "task", "subset", "version", "representation",
"family", "hierarchy", "task", "username"
]
def process(self, instance):
@ -153,9 +159,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
io.install()
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -163,10 +171,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -175,16 +187,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
# self.log.info("Verifying version from assumed destination")
# assumed_data = instance.data["assumedTemplateData"]
# assumed_version = assumed_data["version"]
# if assumed_version != next_version:
# raise AttributeError("Assumed version 'v{0:03d}' does not match"
# "next version in database "
# "('v{1:03d}')".format(assumed_version,
# next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
@ -270,6 +272,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"version": int(version["name"]),
"hierarchy": hierarchy}
# Add datetime data to template data
datetime_data = context.data.get("datetimeData") or {}
template_data.update(datetime_data)
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
files = repre['files']
if repre.get('stagingDir'):
stagingdir = repre['stagingDir']
@ -279,7 +296,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
anatomy.templates[template_name]["path"])
sequence_repre = isinstance(files, list)
repre_context = None
if sequence_repre:
src_collections, remainder = clique.assemble(files)
self.log.debug(
@ -302,10 +319,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template_data["representation"] = repre['ext']
template_data["frame"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
if repre_context is None:
repre_context = template_filled.used_values
test_dest_files.append(
os.path.normpath(
anatomy_filled[template_name]["path"])
os.path.normpath(template_filled)
)
self.log.debug(
@ -319,19 +338,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = None
if repre.get("frameStart"):
frame_start_padding = len(str(
repre.get("frameEnd")))
frame_start_padding = anatomy.templates["render"]["padding"]
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
src_padding = src_padding_exp % i
# for adding first frame into db
if not dst_start_frame:
dst_start_frame = src_padding
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
@ -353,11 +371,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
# for adding first frame into db
if not dst_start_frame:
dst_start_frame = dst_padding
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
dst_tail).replace("..", ".")
repre['published_path'] = dst
repre['published_path'] = self.unc_convert(dst)
else:
# Single file
@ -381,15 +404,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = os.path.normpath(
anatomy_filled[template_name]["path"]).replace("..", ".")
template_filled = anatomy_filled[template_name]["path"]
repre_context = template_filled.used_values
dst = os.path.normpath(template_filled).replace("..", ".")
instance.data["transfers"].append([src, dst])
repre['published_path'] = dst
repre['published_path'] = self.unc_convert(dst)
self.log.debug("__ dst: {}".format(dst))
for key in self.db_representation_context_keys:
value = template_data.get(key)
if not value:
continue
repre_context[key] = template_data[key]
representation = {
"_id": io.ObjectId(),
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
@ -399,21 +430,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
'task': TASK,
"silo": asset.get('silo'),
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": version["name"],
"hierarchy": hierarchy,
"representation": repre['ext']
}
"context": repre_context
}
if repre.get("outputName"):
representation["context"]["output"] = repre['outputName']
if sequence_repre and repre.get("frameStart"):
representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
@ -429,6 +451,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("__ represNAME: {}".format(rep['name']))
self.log.debug("__ represPATH: {}".format(rep['published_path']))
io.insert_many(representations)
instance.data["published_representations"] = representations
# self.log.debug("Representation: {}".format(representations))
self.log.info("Registered {} items".format(len(representations)))
@ -460,6 +483,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def unc_convert(self, path):
self.log.debug("> __ path: `{}`".format(path))
drive, _path = os.path.splitdrive(path)
self.log.debug("> __ drive, _path: `{}`, `{}`".format(drive, _path))
if not os.path.exists(drive + "/"):
self.log.info("Converting to unc from environments ..")
path_replace = os.getenv("PYPE_STUDIO_PROJECTS_PATH")
path_mount = os.getenv("PYPE_STUDIO_PROJECTS_MOUNT")
if "/" in path_mount:
path = path.replace(path_mount[0:-1], path_replace)
else:
path = path.replace(path_mount, path_replace)
return path
def copy_file(self, src, dst):
""" Copy given source to destination
@ -469,11 +509,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
Returns:
None
"""
src = str(Path(src).resolve())
drive, _path = os.path.splitdrive(dst)
unc = Path(drive).resolve()
dst = str(unc / _path)
src = self.unc_convert(src)
dst = self.unc_convert(dst)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)
@ -494,8 +531,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
src = Path(src).resolve()
dst = Path(dst).resolve()
src = self.unc_convert(src)
dst = self.unc_convert(dst)
try:
os.makedirs(dirname)
except OSError as e:
@ -508,9 +547,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
@ -601,7 +642,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
"fps": context.data.get(
"fps", instance.data.get("fps"))}
# Include optional data if present in
optionals = [

View file

@ -88,9 +88,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -98,10 +100,14 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -251,9 +257,6 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
@ -332,9 +335,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]

View file

@ -0,0 +1,139 @@
import os
import sys
import errno
import shutil
import copy
import six
import pyblish.api
from bson.objectid import ObjectId
from avalon import api, io
class IntegrateThumbnails(pyblish.api.InstancePlugin):
"""Integrate Thumbnails."""
label = "Integrate Thumbnails"
order = pyblish.api.IntegratorOrder + 0.01
families = ["review"]
def process(self, instance):
if not os.environ.get("AVALON_THUMBNAIL_ROOT"):
self.log.info("AVALON_THUMBNAIL_ROOT is not set."
" Skipping thumbnail integration.")
return
published_repres = instance.data.get("published_representations")
if not published_repres:
self.log.debug(
"There are not published representation ids on the instance."
)
return
project_name = api.Session["AVALON_PROJECT"]
anatomy = instance.context.data["anatomy"]
if "publish" not in anatomy.templates:
raise AssertionError("Anatomy does not have set publish key!")
if "thumbnail" not in anatomy.templates["publish"]:
raise AssertionError((
"There is not set \"thumbnail\" template for project \"{}\""
).format(project_name))
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
io.install()
thumb_repre = None
for repre in published_repres:
if repre["name"].lower() == "thumbnail":
thumb_repre = repre
break
if not thumb_repre:
self.log.debug(
"There is not representation with name \"thumbnail\""
)
return
version = io.find_one({"_id": thumb_repre["parent"]})
if not version:
raise AssertionError(
"There does not exist version with id {}".format(
str(thumb_repre["parent"])
)
)
# Get full path to thumbnail file from representation
src_full_path = os.path.normpath(thumb_repre["data"]["path"])
if not os.path.exists(src_full_path):
self.log.warning("Thumbnail file was not found. Path: {}".format(
src_full_path
))
return
filename, file_extension = os.path.splitext(src_full_path)
# Create id for mongo entity now to fill anatomy template
thumbnail_id = ObjectId()
# Prepare anatomy template fill data
template_data = copy.deepcopy(thumb_repre["context"])
template_data.update({
"_id": str(thumbnail_id),
"thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"),
"ext": file_extension,
"thumbnail_type": "thumbnail"
})
anatomy_filled = anatomy.format(template_data)
final_path = anatomy_filled.get("publish", {}).get("thumbnail")
if not final_path:
raise AssertionError((
"Anatomy template was not filled with entered data"
"\nTemplate: {} "
"\nData: {}"
).format(thumbnail_template, str(template_data)))
dst_full_path = os.path.normpath(final_path)
self.log.debug(
"Copying file .. {} -> {}".format(src_full_path, dst_full_path)
)
dirname = os.path.dirname(dst_full_path)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
tp, value, tb = sys.exc_info()
six.reraise(tp, value, tb)
shutil.copy(src_full_path, dst_full_path)
# Clean template data from keys that are dynamic
template_data.pop("_id")
template_data.pop("thumbnail_root")
thumbnail_entity = {
"_id": thumbnail_id,
"type": "thumbnail",
"schema": "pype:thumbnail-1.0",
"data": {
"template": thumbnail_template,
"template_data": template_data
}
}
# Create thumbnail entity
io.insert_one(thumbnail_entity)
self.log.debug(
"Creating entity in database {}".format(str(thumbnail_entity))
)
# Set thumbnail id for version
io.update_many(
{"_id": version["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
self.log.debug("Setting thumbnail for version \"{}\" <{}>".format(
version["name"], str(version["_id"])
))

View file

@ -33,14 +33,22 @@ def _get_script():
# Logic to retrieve latest files concerning extendFrames
def get_latest_version(asset_name, subset_name, family):
# Get asset
asset_name = io.find_one({"type": "asset",
"name": asset_name},
projection={"name": True})
asset_name = io.find_one(
{
"type": "asset",
"name": asset_name
},
projection={"name": True}
)
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]},
projection={"_id": True, "name": True})
subset = io.find_one(
{
"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]
},
projection={"_id": True, "name": True}
)
# Check if subsets actually exists (pre-run check)
assert subset, "No subsets found, please publish with `extendFrames` off"
@ -51,11 +59,15 @@ def get_latest_version(asset_name, subset_name, family):
"data.endFrame": True,
"parent": True}
version = io.find_one({"type": "version",
"parent": subset["_id"],
"data.families": family},
projection=version_projection,
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"],
"data.families": family
},
projection=version_projection,
sort=[("name", -1)]
)
assert version, "No version found, this is a bug"
@ -149,7 +161,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"PYPE_ROOT"
"PYPE_ROOT",
"PYPE_METADATA_FILE",
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT"
]
def _submit_deadline_post_job(self, instance, job):
@ -160,7 +175,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"""
data = instance.data.copy()
subset = data["subset"]
state = data.get("publishJobState", "Suspended")
job_name = "{batch} - {subset} [publish image sequence]".format(
batch=job["Props"]["Name"],
subset=subset
@ -172,7 +186,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
metadata_path = os.path.normpath(metadata_path)
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
network_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_PATH'])
network_root = os.path.normpath(
os.environ['PYPE_STUDIO_PROJECTS_PATH'])
metadata_path = metadata_path.replace(mount_root, network_root)
@ -186,13 +201,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"InitialStatus": state,
"Priority": job["Props"]["Pri"]
},
"PluginInfo": {
"Version": "3.6",
"ScriptFile": _get_script(),
"Arguments": '--paths "{}"'.format(metadata_path),
"Arguments": "",
"SingleFrameOnly": "True"
},
@ -204,6 +218,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# job so they use the same environment
environment = job["Props"].get("Env", {})
environment["PYPE_METADATA_FILE"] = metadata_path
i = 0
for index, key in enumerate(environment):
self.log.info("KEY: {}".format(key))
@ -241,6 +256,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"""
# Get a submission job
data = instance.data.copy()
if hasattr(instance, "_log"):
data['_log'] = instance._log
render_job = data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"
@ -318,6 +335,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"source": source,
"user": context.data["user"],
"version": context.data["version"],
"intent": context.data.get("intent"),
"comment": context.data.get("comment"),
# Optional metadata (for debugging)
"metadata": {
"instance": data,
@ -326,6 +345,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
}
}
if api.Session["AVALON_APP"] == "nuke":
metadata['subset'] = subset
if submission_type == "muster":
ftrack = {
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),

View file

@ -47,7 +47,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin):
host = pyblish.api.current_host()
to_check = context.data["presets"].get(
host, {}).get("ftrack_attributes")
host, {}).get("ftrack_custom_attributes")
if not to_check:
self.log.warning("ftrack_attributes preset not found")
return

View file

@ -140,9 +140,9 @@ class ImportMayaLoader(api.Loader):
message = "Are you sure you want import this"
state = QtWidgets.QMessageBox.warning(None,
"Are you sure?",
message,
buttons=buttons,
defaultButton=accept)
"Are you sure?",
message,
buttons=buttons,
defaultButton=accept)
return state == accept

View file

@ -1,62 +0,0 @@
import pype.maya.plugin
import os
from pypeapp import config
class CameraLoader(pype.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the pype.camera family"""
families = ["camera"]
label = "Reference camera"
representations = ["abc", "ma"]
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
# Get family type from the context
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "camera"
cmds.loadPlugin("AbcImport.mll", quiet=True)
groupName = "{}:{}".format(namespace, name)
nodes = cmds.file(self.fname,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName="{}:{}".format(namespace, name),
reference=True,
returnNewNodes=True)
cameras = cmds.ls(nodes, type="camera")
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
# Check the Maya version, lockTransform has been introduced since
# Maya 2016.5 Ext 2
version = int(cmds.about(version=True))
if version >= 2016:
for camera in cameras:
cmds.camera(camera, edit=True, lockTransform=True)
else:
self.log.warning("This version of Maya does not support locking of"
" transforms of cameras.")
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -1,54 +0,0 @@
import pype.maya.plugin
import os
from pypeapp import config
class FBXLoader(pype.maya.plugin.ReferenceLoader):
"""Load the FBX"""
families = ["fbx"]
representations = ["fbx"]
label = "Reference FBX"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "fbx"
# Ensure FBX plug-in is loaded
cmds.loadPlugin("fbxmaya", quiet=True)
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
groupName = "{}:{}".format(namespace, name)
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -116,9 +116,11 @@ class LookLoader(pype.maya.plugin.ReferenceLoader):
shapes=True))
nodes = set(nodes_list)
json_representation = io.find_one({"type": "representation",
"parent": representation['parent'],
"name": "json"})
json_representation = io.find_one({
"type": "representation",
"parent": representation['parent'],
"name": "json"
})
# Load relationships
shader_relation = api.get_representation_path(json_representation)

View file

@ -1,68 +0,0 @@
import pype.maya.plugin
from pypeapp import config
import os
class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader):
"""Load the model"""
families = ["mayaAscii",
"setdress",
"layout"]
representations = ["ma"]
label = "Reference Maya Ascii"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "model"
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
self[:] = nodes
groupName = "{}:{}".format(namespace, name)
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
cmds.setAttr(groupName + ".displayHandle", 1)
# get bounding box
bbox = cmds.exactWorldBoundingBox(groupName)
# get pivot position on world space
pivot = cmds.xform(groupName, q=True, sp=True, ws=True)
# center of bounding box
cx = (bbox[0] + bbox[3]) / 2
cy = (bbox[1] + bbox[4]) / 2
cz = (bbox[2] + bbox[5]) / 2
# add pivot position to calculate offset
cx = cx + pivot[0]
cy = cy + pivot[1]
cz = cz + pivot[2]
# set selection handle offset to center of bounding box
cmds.setAttr(groupName + ".selectHandleX", cx)
cmds.setAttr(groupName + ".selectHandleY", cy)
cmds.setAttr(groupName + ".selectHandleZ", cz)
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -1,4 +1,6 @@
import pype.maya.plugin
from avalon import api, maya
from maya import cmds
import os
from pypeapp import config
@ -6,8 +8,15 @@ from pypeapp import config
class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
"""Load the model"""
families = ["model", "pointcache", "animation"]
representations = ["ma", "abc"]
families = ["model",
"pointcache",
"animation",
"mayaAscii",
"setdress",
"layout",
"camera",
"rig"]
representations = ["ma", "abc", "fbx"]
tool_names = ["loader"]
label = "Reference"
@ -37,27 +46,29 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
reference=True,
returnNewNodes=True)
namespace = cmds.referenceQuery(nodes[0], namespace=True)
# namespace = cmds.referenceQuery(nodes[0], namespace=True)
shapes = cmds.ls(nodes, shapes=True, long=True)
print(shapes)
newNodes = (list(set(nodes) - set(shapes)))
print(newNodes)
current_namespace = pm.namespaceInfo(currentNamespace=True)
if current_namespace != ":":
groupName = current_namespace + ":" + groupName
groupNode = pm.PyNode(groupName)
roots = set()
print(nodes)
for node in newNodes:
try:
roots.add(pm.PyNode(node).getAllParents()[-2])
except:
except: # noqa: E722
pass
for root in roots:
root.setParent(world=True)
groupNode.root().zeroTransformPivots()
groupNode.zeroTransformPivots()
for root in roots:
root.setParent(groupNode)
@ -90,23 +101,39 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
cmds.setAttr(groupName + ".selectHandleY", cy)
cmds.setAttr(groupName + ".selectHandleZ", cz)
if data.get("post_process", True):
if family == "rig":
self._post_process_rig(name, namespace, context, data)
return newNodes
def switch(self, container, representation):
self.update(container, representation)
def _post_process_rig(self, name, namespace, context, data):
# for backwards compatibility
class AbcLoader(ReferenceLoader):
label = "Deprecated loader (don't use)"
families = ["pointcache", "animation"]
representations = ["abc"]
tool_names = []
output = next((node for node in self if
node.endswith("out_SET")), None)
controls = next((node for node in self if
node.endswith("controls_SET")), None)
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
# for backwards compatibility
class ModelLoader(ReferenceLoader):
label = "Deprecated loader (don't use)"
families = ["model", "pointcache"]
representations = ["abc"]
tool_names = []
# Find the roots amongst the loaded nodes
roots = cmds.ls(self[:], assemblies=True, long=True)
assert roots, "No root nodes in rig, this is a bug."
asset = api.Session["AVALON_ASSET"]
dependency = str(context["representation"]["_id"])
self.log.info("Creating subset: {}".format(namespace))
# Create the animation instance
with maya.maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
api.create(name=namespace,
asset=asset,
family="animation",
options={"useSelection": True},
data={"dependencies": dependency})

View file

@ -1,95 +0,0 @@
from maya import cmds
import pype.maya.plugin
from avalon import api, maya
import os
from pypeapp import config
class RigLoader(pype.maya.plugin.ReferenceLoader):
"""Specific loader for rigs
This automatically creates an instance for animators upon load.
"""
families = ["rig"]
representations = ["ma"]
label = "Reference rig"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "rig"
groupName = "{}:{}".format(namespace, name)
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=groupName)
cmds.xform(groupName, pivots=(0, 0, 0))
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
shapes = cmds.ls(nodes, shapes=True, long=True)
print(shapes)
newNodes = (list(set(nodes) - set(shapes)))
print(newNodes)
# Store for post-process
self[:] = newNodes
if data.get("post_process", True):
self._post_process(name, namespace, context, data)
return newNodes
def _post_process(self, name, namespace, context, data):
# TODO(marcus): We are hardcoding the name "out_SET" here.
# Better register this keyword, so that it can be used
# elsewhere, such as in the Integrator plug-in,
# without duplication.
output = next((node for node in self if
node.endswith("out_SET")), None)
controls = next((node for node in self if
node.endswith("controls_SET")), None)
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
# Find the roots amongst the loaded nodes
roots = cmds.ls(self[:], assemblies=True, long=True)
assert roots, "No root nodes in rig, this is a bug."
asset = api.Session["AVALON_ASSET"]
dependency = str(context["representation"]["_id"])
# Create the animation instance
with maya.maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
api.create(name=namespace,
asset=asset,
family="animation",
options={"useSelection": True},
data={"dependencies": dependency})
def switch(self, container, representation):
self.update(container, representation)

View file

@ -117,7 +117,7 @@ class VRayProxyLoader(api.Loader):
vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name))
mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name))
vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True,
name="{}_VRMM".format(name))
name="{}_VRMM".format(name))
vray_mat_sg = cmds.sets(name="{}_VRSG".format(name),
empty=True,
renderable=True,

View file

@ -21,15 +21,17 @@ class CollectAssData(pyblish.api.InstancePlugin):
objsets = instance.data['setMembers']
for objset in objsets:
objset = str(objset)
members = cmds.sets(objset, query=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
if objset == "content_SET":
if "content_SET" in objset:
instance.data['setMembers'] = members
elif objset == "proxy_SET":
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
assert len(members) == 1, "You have multiple proxy meshes, please only use one"
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
self.log.debug("data: {}".format(instance.data))

View file

@ -119,11 +119,15 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
texture_filenames = []
if image_search_paths:
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
# Later on check whether this is pipeline OS cross-compatible.
image_search_paths = [p for p in
image_search_paths.split(os.path.pathsep) if p]
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
image_search_paths = self._replace_tokens(image_search_paths)
# List all related textures
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
self.log.info("Found %i texture(s)" % len(texture_filenames))
@ -140,6 +144,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
"atttribute'" % node)
# Collect all texture files
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
texture_filenames = self._replace_tokens(texture_filenames)
for texture in texture_filenames:
files = []
@ -283,3 +289,20 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
collection, remainder = clique.assemble(files, patterns=pattern)
return collection
def _replace_tokens(self, strings):
env_re = re.compile(r"\$\{(\w+)\}")
replaced = []
for s in strings:
matches = re.finditer(env_re, s)
for m in matches:
try:
s = s.replace(m.group(), os.environ[m.group(1)])
except KeyError:
msg = "Cannot find requested {} in environment".format(
m.group(1))
self.log.error(msg)
raise RuntimeError(msg)
replaced.append(s)
return replaced

View file

@ -17,6 +17,7 @@ class ExtractAssStandin(pype.api.Extractor):
label = "Ass Standin (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
@ -47,7 +48,7 @@ class ExtractAssStandin(pype.api.Extractor):
exported_files = cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=True,
asciiAss=self.asciiAss,
shadowLinks=True,
lightLinks=True,
boundingBox=True,
@ -59,13 +60,15 @@ class ExtractAssStandin(pype.api.Extractor):
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
else:
self.log.info("Extracting ass")
cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=True,
asciiAss=False,
shadowLinks=True,
lightLinks=True,
boundingBox=True
)
self.log.info("Extracted {}".format(filename))
filenames = filename
optionals = [
"frameStart", "frameEnd", "step", "handles",

View file

@ -22,11 +22,11 @@ class ExtractAssembly(pype.api.Extractor):
def process(self, instance):
parent_dir = self.staging_dir(instance)
staging_dir = self.staging_dir(instance)
hierarchy_filename = "{}.abc".format(instance.name)
hierarchy_path = os.path.join(parent_dir, hierarchy_filename)
hierarchy_path = os.path.join(staging_dir, hierarchy_filename)
json_filename = "{}.json".format(instance.name)
json_path = os.path.join(parent_dir, json_filename)
json_path = os.path.join(staging_dir, json_filename)
self.log.info("Dumping scene data for debugging ..")
with open(json_path, "w") as filepath:
@ -46,8 +46,24 @@ class ExtractAssembly(pype.api.Extractor):
"uvWrite": True,
"selection": True})
instance.data["files"] = [json_filename, hierarchy_filename]
if "representations" not in instance.data:
instance.data["representations"] = []
representation_abc = {
'name': 'abc',
'ext': 'abc',
'files': hierarchy_filename,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation_abc)
representation_json = {
'name': 'json',
'ext': 'json',
'files': json_filename,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation_json)
# Remove data
instance.data.pop("scenedata", None)

View file

@ -429,33 +429,42 @@ class ExtractLook(pype.api.Extractor):
a_template = anatomy.templates
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True},
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one(
{"type": "asset", "name": asset_name, "parent": project["_id"]}
)
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'").format(asset_name, project_name)
silo = asset.get("silo")
subset = io.find_one(
{"type": "subset", "name": subset_name, "parent": asset["_id"]}
)
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)]
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version

View file

@ -1,16 +1,14 @@
import os
import glob
import contextlib
import capture_gui
import clique
import capture
#
import pype.maya.lib as lib
import pype.api
#
from maya import cmds, mel
import pymel.core as pm
# import ffmpeg
# # from pype.scripts import otio_burnin
# reload(ffmpeg)
# TODO: move codec settings to presets
@ -93,7 +91,18 @@ class ExtractQuicktime(pype.api.Extractor):
pm.currentTime(refreshFrameInt, edit=True)
with maintained_time():
playblast = capture_gui.lib.capture_scene(preset)
filename = preset.get("filename", "%TEMP%")
# Force viewer to False in call to capture because we have our own
# viewer opening call to allow a signal to trigger between playblast
# and viewer
preset['viewer'] = False
# Remove panel key since it's internal value to capture_gui
preset.pop("panel", None)
path = capture.capture(**preset)
playblast = self._fix_playblast_output_path(path)
self.log.info("file list {}".format(playblast))
@ -119,6 +128,46 @@ class ExtractQuicktime(pype.api.Extractor):
}
instance.data["representations"].append(representation)
def _fix_playblast_output_path(self, filepath):
"""Workaround a bug in maya.cmds.playblast to return correct filepath.
When the `viewer` argument is set to False and maya.cmds.playblast
does not automatically open the playblasted file the returned
filepath does not have the file's extension added correctly.
To workaround this we just glob.glob() for any file extensions and
assume the latest modified file is the correct file and return it.
"""
# Catch cancelled playblast
if filepath is None:
self.log.warning("Playblast did not result in output path. "
"Playblast is probably interrupted.")
return None
# Fix: playblast not returning correct filename (with extension)
# Lets assume the most recently modified file is the correct one.
if not os.path.exists(filepath):
directory = os.path.dirname(filepath)
filename = os.path.basename(filepath)
# check if the filepath is has frame based filename
# example : capture.####.png
parts = filename.split(".")
if len(parts) == 3:
query = os.path.join(directory, "{}.*.{}".format(parts[0],
parts[-1]))
files = glob.glob(query)
else:
files = glob.glob("{}.*".format(filepath))
if not files:
raise RuntimeError("Couldn't find playblast from: "
"{0}".format(filepath))
filepath = max(files, key=os.path.getmtime)
return filepath
@contextlib.contextmanager
def maintained_time():

View file

@ -1,31 +1,14 @@
import os
import contextlib
import time
import sys
import glob
import capture_gui
import clique
import capture
import pype.maya.lib as lib
import pype.api
from maya import cmds
import pymel.core as pm
# import ffmpeg
# reload(ffmpeg)
import avalon.maya
# import maya_utils as mu
# from tweakHUD import master
# from tweakHUD import draft_hud as dHUD
# from tweakHUD import ftrackStrings as fStrings
#
# def soundOffsetFunc(oSF, SF, H):
# tmOff = (oSF - H) - SF
# return tmOff
class ExtractThumbnail(pype.api.Extractor):
@ -47,39 +30,8 @@ class ExtractThumbnail(pype.api.Extractor):
end = cmds.currentTime(query=True)
self.log.info("start: {}, end: {}".format(start, end))
members = instance.data['setMembers']
camera = instance.data['review_camera']
# project_code = ftrack_data['Project']['code']
# task_type = ftrack_data['Task']['type']
#
# # load Preset
# studio_repos = os.path.abspath(os.environ.get('studio_repos'))
# shot_preset_path = os.path.join(studio_repos, 'maya',
# 'capture_gui_presets',
# (project_code + '_' + task_type + '_' + asset + '.json'))
#
# task_preset_path = os.path.join(studio_repos, 'maya',
# 'capture_gui_presets',
# (project_code + '_' + task_type + '.json'))
#
# project_preset_path = os.path.join(studio_repos, 'maya',
# 'capture_gui_presets',
# (project_code + '.json'))
#
# default_preset_path = os.path.join(studio_repos, 'maya',
# 'capture_gui_presets',
# 'default.json')
#
# if os.path.isfile(shot_preset_path):
# preset_to_use = shot_preset_path
# elif os.path.isfile(task_preset_path):
# preset_to_use = task_preset_path
# elif os.path.isfile(project_preset_path):
# preset_to_use = project_preset_path
# else:
# preset_to_use = default_preset_path
capture_preset = ""
capture_preset = instance.context.data['presets']['maya']['capture']
try:
@ -126,7 +78,18 @@ class ExtractThumbnail(pype.api.Extractor):
pm.currentTime(refreshFrameInt, edit=True)
with maintained_time():
playblast = capture_gui.lib.capture_scene(preset)
filename = preset.get("filename", "%TEMP%")
# Force viewer to False in call to capture because we have our own
# viewer opening call to allow a signal to trigger between
# playblast and viewer
preset['viewer'] = False
# Remove panel key since it's internal value to capture_gui
preset.pop("panel", None)
path = capture.capture(**preset)
playblast = self._fix_playblast_output_path(path)
_, thumbnail = os.path.split(playblast)
@ -144,6 +107,45 @@ class ExtractThumbnail(pype.api.Extractor):
}
instance.data["representations"].append(representation)
def _fix_playblast_output_path(self, filepath):
"""Workaround a bug in maya.cmds.playblast to return correct filepath.
When the `viewer` argument is set to False and maya.cmds.playblast
does not automatically open the playblasted file the returned
filepath does not have the file's extension added correctly.
To workaround this we just glob.glob() for any file extensions and
assume the latest modified file is the correct file and return it.
"""
# Catch cancelled playblast
if filepath is None:
self.log.warning("Playblast did not result in output path. "
"Playblast is probably interrupted.")
return None
# Fix: playblast not returning correct filename (with extension)
# Lets assume the most recently modified file is the correct one.
if not os.path.exists(filepath):
directory = os.path.dirname(filepath)
filename = os.path.basename(filepath)
# check if the filepath is has frame based filename
# example : capture.####.png
parts = filename.split(".")
if len(parts) == 3:
query = os.path.join(directory, "{}.*.{}".format(parts[0],
parts[-1]))
files = glob.glob(query)
else:
files = glob.glob("{}.*".format(filepath))
if not files:
raise RuntimeError("Couldn't find playblast from: "
"{0}".format(filepath))
filepath = max(files, key=os.path.getmtime)
return filepath
@contextlib.contextmanager
def maintained_time():

View file

@ -228,80 +228,19 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"AuxFiles": []
}
# Include critical environment variables with submission
# We need those to pass them to pype for it to set correct context
keys = [
# This will trigger `userSetup.py` on the slave
# such that proper initialisation happens the same
# way as it does on a local machine.
# TODO(marcus): This won't work if the slaves don't
# have accesss to these paths, such as if slaves are
# running Linux and the submitter is on Windows.
"PYTHONPATH",
"PATH",
"MTOA_EXTENSIONS_PATH",
"MTOA_EXTENSIONS",
"DYLD_LIBRARY_PATH",
"MAYA_RENDER_DESC_PATH",
"MAYA_MODULE_PATH",
"ARNOLD_PLUGIN_PATH",
"AVALON_SCHEMA",
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"PYBLISHPLUGINPATH",
# todo: This is a temporary fix for yeti variables
"PEREGRINEL_LICENSE",
"SOLIDANGLE_LICENSE",
"ARNOLD_LICENSE"
"MAYA_MODULE_PATH",
"TOOL_ENV"
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"PYPE_USERNAME"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
# self.log.debug("enviro: {}".format(pprint(environment)))
for path in os.environ:
if path.lower().startswith('pype_'):
environment[path] = os.environ[path]
environment["PATH"] = os.environ["PATH"]
# self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS']))
clean_environment = {}
for key in environment:
clean_path = ""
self.log.debug("key: {}".format(key))
self.log.debug("value: {}".format(environment[key]))
to_process = str(environment[key])
if key == "PYPE_STUDIO_CORE_MOUNT":
clean_path = to_process
elif "://" in to_process:
clean_path = to_process
elif os.pathsep not in str(to_process):
try:
path = to_process
path.decode('UTF-8', 'strict')
clean_path = os.path.normpath(path)
except UnicodeDecodeError:
print('path contains non UTF characters')
else:
for path in to_process.split(os.pathsep):
try:
path.decode('UTF-8', 'strict')
clean_path += os.path.normpath(path) + os.pathsep
except UnicodeDecodeError:
print('path contains non UTF characters')
if key == "PYTHONPATH":
clean_path = clean_path.replace('python2', 'python3')
clean_path = clean_path.replace(
os.path.normpath(
environment['PYPE_STUDIO_CORE_MOUNT']), # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_PATH'])) # noqa
clean_environment[key] = clean_path
environment = clean_environment
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
@ -319,7 +258,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self.preflight_check(instance)
self.log.info("Submitting..")
self.log.info("Submitting ...")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs

View file

@ -38,9 +38,13 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin):
invalid = list()
asset = instance.data['asset']
asset_data = io.find_one({"name": asset,
"type": "asset"},
projection={"_id": True})
asset_data = io.find_one(
{
"name": asset,
"type": "asset"
},
projection={"_id": True}
)
asset_id = str(asset_data['_id'])
# We do want to check the referenced nodes as we it might be

View file

@ -49,9 +49,10 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin):
"""Check if subset is registered in the database under the asset"""
asset = io.find_one({"type": "asset", "name": asset_name})
is_valid = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
is_valid = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
return is_valid

View file

@ -1,24 +0,0 @@
import pyblish.api
class WriteToRender(pyblish.api.InstancePlugin):
"""Swith Render knob on write instance to on,
so next time publish will be set to render
"""
order = pyblish.api.ExtractorOrder + 0.1
label = "Write to render next"
optional = True
hosts = ["nuke", "nukeassist"]
families = ["write"]
def process(self, instance):
return
if [f for f in instance.data["families"]
if ".frames" in f]:
instance[0]["render"].setValue(True)
self.log.info("Swith write node render to `on`")
else:
# swith to
instance[0]["render"].setValue(False)
self.log.info("Swith write node render to `Off`")

View file

@ -1,14 +0,0 @@
import pyblish.api
import nuke
class CollectActiveViewer(pyblish.api.ContextPlugin):
"""Collect any active viewer from nodes
"""
order = pyblish.api.CollectorOrder + 0.3
label = "Collect Active Viewer"
hosts = ["nuke"]
def process(self, context):
context.data["ActiveViewer"] = nuke.activeViewer()

View file

@ -1,22 +0,0 @@
import pyblish
class ExtractFramesToIntegrate(pyblish.api.InstancePlugin):
"""Extract rendered frames for integrator
"""
order = pyblish.api.ExtractorOrder
label = "Extract rendered frames"
hosts = ["nuke"]
families = ["render"]
def process(self, instance\
return
# staging_dir = instance.data.get('stagingDir', None)
# output_dir = instance.data.get('outputDir', None)
#
# if not staging_dir:
# staging_dir = output_dir
# instance.data['stagingDir'] = staging_dir
# # instance.data['transfer'] = False

View file

@ -1,116 +0,0 @@
import os
import nuke
import pyblish.api
class Extract(pyblish.api.InstancePlugin):
"""Super class for write and writegeo extractors."""
order = pyblish.api.ExtractorOrder
optional = True
label = "Extract Nuke [super]"
hosts = ["nuke"]
match = pyblish.api.Subset
# targets = ["process.local"]
def execute(self, instance):
# Get frame range
node = instance[0]
first_frame = nuke.root()["first_frame"].value()
last_frame = nuke.root()["last_frame"].value()
if node["use_limit"].value():
first_frame = node["first"].value()
last_frame = node["last"].value()
# Render frames
nuke.execute(node.name(), int(first_frame), int(last_frame))
class ExtractNukeWrite(Extract):
""" Extract output from write nodes. """
families = ["write", "local"]
label = "Extract Write"
def process(self, instance):
self.execute(instance)
# Validate output
for filename in list(instance.data["collection"]):
if not os.path.exists(filename):
instance.data["collection"].remove(filename)
self.log.warning("\"{0}\" didn't render.".format(filename))
class ExtractNukeCache(Extract):
label = "Cache"
families = ["cache", "local"]
def process(self, instance):
self.execute(instance)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg
class ExtractNukeCamera(Extract):
label = "Camera"
families = ["camera", "local"]
def process(self, instance):
node = instance[0]
node["writeGeometries"].setValue(False)
node["writePointClouds"].setValue(False)
node["writeAxes"].setValue(False)
file_path = node["file"].getValue()
node["file"].setValue(instance.data["output_path"])
self.execute(instance)
node["writeGeometries"].setValue(True)
node["writePointClouds"].setValue(True)
node["writeAxes"].setValue(True)
node["file"].setValue(file_path)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg
class ExtractNukeGeometry(Extract):
label = "Geometry"
families = ["geometry", "local"]
def process(self, instance):
node = instance[0]
node["writeCameras"].setValue(False)
node["writePointClouds"].setValue(False)
node["writeAxes"].setValue(False)
file_path = node["file"].getValue()
node["file"].setValue(instance.data["output_path"])
self.execute(instance)
node["writeCameras"].setValue(True)
node["writePointClouds"].setValue(True)
node["writeAxes"].setValue(True)
node["file"].setValue(file_path)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg

View file

@ -1,40 +0,0 @@
import pyblish.api
import os
import pype
import shutil
class ExtractScript(pype.api.Extractor):
"""Publish script
"""
label = 'Extract Script'
order = pyblish.api.ExtractorOrder - 0.05
optional = True
hosts = ['nuke']
families = ["workfile"]
def process(self, instance):
self.log.debug("instance extracting: {}".format(instance.data))
current_script = instance.context.data["currentFile"]
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}".format(instance.data["name"])
path = os.path.join(stagingdir, filename)
self.log.info("Performing extraction..")
shutil.copy(current_script, path)
if "representations" not in instance.data:
instance.data["representations"] = list()
representation = {
'name': 'nk',
'ext': '.nk',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -1,27 +0,0 @@
import pyblish.api
import shutil
import os
class CopyStagingDir(pyblish.api.InstancePlugin):
"""Copy data rendered into temp local directory
"""
order = pyblish.api.IntegratorOrder - 2
label = "Copy data from temp dir"
hosts = ["nuke", "nukeassist"]
families = ["render.local"]
def process(self, instance):
temp_dir = instance.data.get("stagingDir")
output_dir = instance.data.get("outputDir")
# copy data to correct dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.log.info("output dir has been created")
for f in os.listdir(temp_dir):
self.log.info("copy file to correct destination: {}".format(f))
shutil.copy(os.path.join(temp_dir, os.path.basename(f)),
os.path.join(output_dir, os.path.basename(f)))

View file

@ -1,98 +0,0 @@
import re
import os
import json
import subprocess
import pyblish.api
from pype.action import get_errored_plugins_from_data
def _get_script():
"""Get path to the image sequence script"""
# todo: use a more elegant way to get the python script
try:
from pype.fusion.scripts import publish_filesequence
except Exception:
raise RuntimeError("Expected module 'publish_imagesequence'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
class PublishImageSequence(pyblish.api.InstancePlugin):
"""Publish the generated local image sequences."""
order = pyblish.api.IntegratorOrder
label = "Publish Rendered Image Sequence(s)"
hosts = ["fusion"]
families = ["saver.renderlocal"]
def process(self, instance):
# Skip this plug-in if the ExtractImageSequence failed
errored_plugins = get_errored_plugins_from_data(instance.context)
if any(plugin.__name__ == "FusionRenderLocal" for plugin in
errored_plugins):
raise RuntimeError("Fusion local render failed, "
"publishing images skipped.")
subset = instance.data["subset"]
ext = instance.data["ext"]
# Regex to match resulting renders
regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset),
ext=re.escape(ext))
# The instance has most of the information already stored
metadata = {
"regex": regex,
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}
# Write metadata and store the path in the instance
output_directory = instance.data["outputDir"]
path = os.path.join(output_directory,
"{}_metadata.json".format(subset))
with open(path, "w") as f:
json.dump(metadata, f)
assert os.path.isfile(path), ("Stored path is not a file for %s"
% instance.data["name"])
# Suppress any subprocess console
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
process = subprocess.Popen(["python", _get_script(),
"--paths", path],
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
while True:
output = process.stdout.readline()
# Break when there is no output or a return code has been given
if output == '' and process.poll() is not None:
process.stdout.close()
break
if output:
line = output.strip()
if line.startswith("ERROR"):
self.log.error(line)
else:
self.log.info(line)
if process.returncode != 0:
raise RuntimeError("Process quit with non-zero "
"return code: {}".format(process.returncode))

View file

@ -1,24 +0,0 @@
import pyblish.api
import nuke
class ValidateActiveViewer(pyblish.api.ContextPlugin):
"""Validate presentse of the active viewer from nodes
"""
order = pyblish.api.ValidatorOrder
label = "Validate Active Viewer"
hosts = ["nuke"]
def process(self, context):
viewer_process_node = context.data.get("ViewerProcess")
assert viewer_process_node, (
"Missing active viewer process! Please click on output write node and push key number 1-9"
)
active_viewer = context.data["ActiveViewer"]
active_input = active_viewer.activeInput()
assert active_input is not None, (
"Missing active viewer input! Please click on output write node and push key number 1-9"
)

View file

@ -1,36 +0,0 @@
import os
import pyblish.api
import pype.utils
@pyblish.api.log
class RepairNukeWriteNodeVersionAction(pyblish.api.Action):
label = "Repair"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
import pype.nuke.lib as nukelib
instances = pype.utils.filter_instances(context, plugin)
for instance in instances:
node = instance[0]
render_path = nukelib.get_render_path(node)
self.log.info("render_path: {}".format(render_path))
node['file'].setValue(render_path.replace("\\", "/"))
class ValidateVersionMatch(pyblish.api.InstancePlugin):
"""Checks if write version matches workfile version"""
label = "Validate Version Match"
order = pyblish.api.ValidatorOrder
actions = [RepairNukeWriteNodeVersionAction]
hosts = ["nuke"]
families = ['write']
def process(self, instance):
assert instance.data['version'] == instance.context.data['version'], "\
Version in write doesn't match version of the workfile"

View file

@ -1,59 +0,0 @@
import pyblish.api
import pype.api
import pype.nuke.actions
class RepairWriteFamiliesAction(pyblish.api.Action):
label = "Fix Write's render attributes"
on = "failed"
icon = "wrench"
def process(self, instance, plugin):
self.log.info("instance {}".format(instance))
instance["render"].setValue(True)
self.log.info("Rendering toggled ON")
@pyblish.api.log
class ValidateWriteFamilies(pyblish.api.InstancePlugin):
""" Validates write families. """
order = pyblish.api.ValidatorOrder
label = "Valitade writes families"
hosts = ["nuke"]
families = ["write"]
actions = [pype.nuke.actions.SelectInvalidAction, pype.api.RepairAction]
@staticmethod
def get_invalid(self, instance):
if not [f for f in instance.data["families"]
if ".frames" in f]:
return
if not instance.data.get('files'):
return (instance)
def process(self, instance):
self.log.debug('instance.data["files"]: {}'.format(instance.data['files']))
invalid = self.get_invalid(self, instance)
if invalid:
raise ValueError(str("`{}`: Switch `Render` on! "
"> {}".format(__name__, invalid)))
# if any(".frames" in f for f in instance.data["families"]):
# if not instance.data["files"]:
# raise ValueError("instance {} is set to publish frames\
# but no files were collected, render the frames first or\
# check 'render' checkbox onthe no to 'ON'".format(instance)))
#
#
# self.log.info("Checked correct writes families")
@classmethod
def repair(cls, instance):
cls.log.info("instance {}".format(instance))
instance[0]["render"].setValue(True)
cls.log.info("Rendering toggled ON")

View file

@ -35,8 +35,10 @@ class CreateBackdrop(Creator):
return instance
else:
nuke.message("Please select nodes you "
"wish to add to a container")
msg = "Please select nodes you "
"wish to add to a container"
self.log.error(msg)
nuke.message(msg)
return
else:
bckd_node = autoBackdrop()

View file

@ -36,8 +36,10 @@ class CreateGizmo(Creator):
node["tile_color"].setValue(int(self.node_color, 16))
return anlib.imprint(node, self.data)
else:
nuke.message("Please select a group node "
"you wish to publish as the gizmo")
msg = ("Please select a group node "
"you wish to publish as the gizmo")
self.log.error(msg)
nuke.message(msg)
if len(nodes) >= 2:
anlib.select_nodes(nodes)
@ -58,8 +60,10 @@ class CreateGizmo(Creator):
return anlib.imprint(gizmo_node, self.data)
else:
nuke.message("Please select nodes you "
"wish to add to the gizmo")
msg = ("Please select nodes you "
"wish to add to the gizmo")
self.log.error(msg)
nuke.message(msg)
return
else:
with anlib.maintained_selection():

View file

@ -34,7 +34,9 @@ class CrateRead(avalon.nuke.Creator):
nodes = self.nodes
if not nodes or len(nodes) == 0:
nuke.message('Please select Read node')
msg = "Please select Read node"
self.log.error(msg)
nuke.message(msg)
else:
count_reads = 0
for node in nodes:
@ -46,7 +48,9 @@ class CrateRead(avalon.nuke.Creator):
count_reads += 1
if count_reads < 1:
nuke.message('Please select Read node')
msg = "Please select Read node"
self.log.error(msg)
nuke.message(msg)
return
def change_read_node(self, name, node, data):

View file

@ -1,8 +0,0 @@
# create publishable read node usually used for enabling version tracking
# also useful for sharing across shots or assets
# if read nodes are selected it will convert them to centainer
# if no read node selected it will create read node and offer browser to shot resource folder
# type movie > mov or imagesequence
# type still > matpaint .psd, .tif, .png,

View file

@ -1,18 +1,14 @@
from collections import OrderedDict
import avalon.api
import avalon.nuke
from pype import api as pype
from pype.nuke import plugin
from pypeapp import config
import nuke
class CreateWriteRender(plugin.PypeCreator):
# change this to template preset
name = "WriteRender"
label = "Create Write Render"
hosts = ["nuke"]
nClass = "write"
n_class = "write"
family = "render"
icon = "sign-out"
defaults = ["Main", "Mask"]
@ -23,7 +19,7 @@ class CreateWriteRender(plugin.PypeCreator):
data = OrderedDict()
data["family"] = self.family
data["families"] = self.nClass
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
@ -31,7 +27,7 @@ class CreateWriteRender(plugin.PypeCreator):
self.data = data
self.nodes = nuke.selectedNodes()
self.log.info("self.data: '{}'".format(self.data))
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
@ -45,7 +41,11 @@ class CreateWriteRender(plugin.PypeCreator):
if (self.options or {}).get("useSelection"):
nodes = self.nodes
assert len(nodes) < 2, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`")
if not (len(nodes) < 2):
msg = ("Select only one node. The node you want to connect to, "
"or tick off `Use selection`")
log.error(msg)
nuke.message(msg)
selected_node = nodes[0]
inputs = [selected_node]
@ -66,7 +66,7 @@ class CreateWriteRender(plugin.PypeCreator):
# recreate new
write_data = {
"class": self.nClass,
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
@ -97,75 +97,121 @@ class CreateWriteRender(plugin.PypeCreator):
return write_node
#
# class CreateWritePrerender(avalon.nuke.Creator):
# # change this to template preset
# preset = "prerender"
#
# name = "WritePrerender"
# label = "Create Write Prerender"
# hosts = ["nuke"]
# family = "{}_write".format(preset)
# families = preset
# icon = "sign-out"
# defaults = ["Main", "Mask"]
#
# def __init__(self, *args, **kwargs):
# super(CreateWritePrerender, self).__init__(*args, **kwargs)
# self.presets = config.get_presets()['plugins']["nuke"]["create"].get(
# self.__class__.__name__, {}
# )
#
# data = OrderedDict()
#
# data["family"] = self.family.split("_")[1]
# data["families"] = self.families
#
# {data.update({k: v}) for k, v in self.data.items()
# if k not in data.keys()}
# self.data = data
#
# def process(self):
# self.name = self.data["subset"]
#
# instance = nuke.toNode(self.data["subset"])
# node = 'write'
#
# if not instance:
# write_data = {
# "class": node,
# "preset": self.preset,
# "avalon": self.data
# }
#
# if self.presets.get('fpath_template'):
# self.log.info("Adding template path from preset")
# write_data.update(
# {"fpath_template": self.presets["fpath_template"]}
# )
# else:
# self.log.info("Adding template path from plugin")
# write_data.update({
# "fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"})
#
# # get group node
# group_node = create_write_node(self.data["subset"], write_data)
#
# # open group node
# group_node.begin()
# for n in nuke.allNodes():
# # get write node
# if n.Class() in "Write":
# write_node = n
# group_node.end()
#
# # linking knobs to group property panel
# linking_knobs = ["first", "last", "use_limit"]
# for k in linking_knobs:
# lnk = nuke.Link_Knob(k)
# lnk.makeLink(write_node.name(), k)
# lnk.setName(k.replace('_', ' ').capitalize())
# lnk.clearFlag(nuke.STARTLINE)
# group_node.addKnob(lnk)
#
# return
class CreateWritePrerender(plugin.PypeCreator):
# change this to template preset
name = "WritePrerender"
label = "Create Write Prerender"
hosts = ["nuke"]
n_class = "write"
family = "prerender"
icon = "sign-out"
defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"]
def __init__(self, *args, **kwargs):
super(CreateWritePrerender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
if not (len(nodes) < 2):
msg = ("Select only one node. The node you want to connect to, "
"or tick off `Use selection`")
self.log.error(msg)
nuke.message(msg)
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
if self.presets.get('fpath_template'):
self.log.info("Adding template path from preset")
write_data.update(
{"fpath_template": self.presets["fpath_template"]}
)
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"})
write_node = pnlib.create_write_node(
self.data["subset"],
write_data,
input=selected_node,
prenodes=[])
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
# open group node
write_node.begin()
for n in nuke.allNodes():
# get write node
if n.Class() in "Write":
w_node = n
write_node.end()
# add inner write node Tab
write_node.addKnob(nuke.Tab_Knob("WriteLinkedKnobs"))
# linking knobs to group property panel
linking_knobs = ["channels", "___", "first", "last", "use_limit"]
for k in linking_knobs:
if "___" in k:
write_node.addKnob(nuke.Text_Knob(''))
else:
lnk = nuke.Link_Knob(k)
lnk.makeLink(w_node.name(), k)
lnk.setName(k.replace('_', ' ').capitalize())
lnk.clearFlag(nuke.STARTLINE)
write_node.addKnob(lnk)
return write_node

View file

@ -0,0 +1,322 @@
from avalon import api, style, io
import nuke
import nukescripts
from pype.nuke import lib as pnlib
from avalon.nuke import lib as anlib
from avalon.nuke import containerise, update_container
reload(pnlib)
class LoadBackdropNodes(api.Loader):
"""Loading Published Backdrop nodes (workfile, nukenodes)"""
representations = ["nk"]
families = ["workfile", "nukenodes"]
label = "Iport Nuke Nodes"
order = 0
icon = "eye"
color = style.colors.light
node_color = "0x7533c1ff"
def load(self, context, name, namespace, data):
"""
Loading function to import .nk file into script and wrap
it on backdrop
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# Get mouse position
n = nuke.createNode("NoOp")
xcursor, ycursor = (n.xpos(), n.ypos())
anlib.reset_selection()
nuke.delete(n)
bdn_frame = 50
with anlib.maintained_selection():
# add group from nk
nuke.nodePaste(file)
# get all pasted nodes
new_nodes = list()
nodes = nuke.selectedNodes()
# get pointer position in DAG
xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame)
# reset position to all nodes and replace inputs and output
for n in nodes:
anlib.reset_selection()
xpos = (n.xpos() - xcursor) + xpointer
ypos = (n.ypos() - ycursor) + ypointer
n.setXYpos(xpos, ypos)
# replace Input nodes for dots
if n.Class() in "Input":
dot = nuke.createNode("Dot")
new_name = n.name().replace("INP", "DOT")
dot.setName(new_name)
dot["label"].setValue(new_name)
dot.setXYpos(xpos, ypos)
new_nodes.append(dot)
# rewire
dep = n.dependent()
for d in dep:
index = next((i for i, dpcy in enumerate(
d.dependencies())
if n is dpcy), 0)
d.setInput(index, dot)
# remove Input node
anlib.reset_selection()
nuke.delete(n)
continue
# replace Input nodes for dots
elif n.Class() in "Output":
dot = nuke.createNode("Dot")
new_name = n.name() + "_DOT"
dot.setName(new_name)
dot["label"].setValue(new_name)
dot.setXYpos(xpos, ypos)
new_nodes.append(dot)
# rewire
dep = next((d for d in n.dependencies()), None)
if dep:
dot.setInput(0, dep)
# remove Input node
anlib.reset_selection()
nuke.delete(n)
continue
else:
new_nodes.append(n)
# reselect nodes with new Dot instead of Inputs and Output
anlib.reset_selection()
anlib.select_nodes(new_nodes)
# place on backdrop
bdn = nukescripts.autoBackdrop()
# add frame offset
xpos = bdn.xpos() - bdn_frame
ypos = bdn.ypos() - bdn_frame
bdwidth = bdn["bdwidth"].value() + (bdn_frame*2)
bdheight = bdn["bdheight"].value() + (bdn_frame*2)
bdn["xpos"].setValue(xpos)
bdn["ypos"].setValue(ypos)
bdn["bdwidth"].setValue(bdwidth)
bdn["bdheight"].setValue(bdheight)
bdn["name"].setValue(object_name)
bdn["label"].setValue("Version tracked frame: \n`{}`\n\nPLEASE DO NOT REMOVE OR MOVE \nANYTHING FROM THIS FRAME!".format(object_name))
bdn["note_font_size"].setValue(20)
return containerise(
node=bdn,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = api.get_representation_path(representation).replace("\\", "/")
context = representation["context"]
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
with anlib.maintained_selection():
xpos = GN.xpos()
ypos = GN.ypos()
avalon_data = anlib.get_avalon_knob_data(GN)
nuke.delete(GN)
# add group from nk
nuke.nodePaste(file)
GN = nuke.selectedNode()
anlib.set_avalon_knob_data(GN, avalon_data)
GN.setXYpos(xpos, ypos)
GN["name"].setValue(object_name)
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd88467ff", 16))
else:
GN["tile_color"].setValue(int(self.node_color, 16))
self.log.info("udated to version: {}".format(version.get("name")))
return update_container(GN, data_imprint)
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
if not (len(nodes) < 2):
msg = "Please create Viewer node before you "
"run this action again"
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
pnlib.create_backdrop(label="Input Process", layer=2,
nodes=[viewer, group_node], color="0x7c7faaff")
return True
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -176,8 +176,10 @@ class LoadGizmoInputProcess(api.Loader):
if len(viewer) > 0:
viewer = viewer[0]
else:
self.log.error("Please create Viewer node before you "
"run this action again")
msg = "Please create Viewer node before you "
"run this action again"
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1

View file

@ -276,7 +276,10 @@ class LoadLutsInputProcess(api.Loader):
if len(viewer) > 0:
viewer = viewer[0]
else:
self.log.error("Please create Viewer node before you run this action again")
msg = "Please create Viewer node before you "
"run this action again"
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1

View file

@ -1,4 +1,5 @@
from avalon import api
import nuke
class MatchmoveLoader(api.Loader):
@ -19,6 +20,8 @@ class MatchmoveLoader(api.Loader):
exec(open(self.fname).read())
else:
self.log.error("Unsupported script type")
msg = "Unsupported script type"
self.log.error(msg)
nuke.message(msg)
return True

View file

@ -1,11 +1,10 @@
import re
import nuke
import contextlib
from avalon import api, io
import nuke
from pype.api import Logger
log = Logger().get_logger(__name__, "nuke")
from pype.nuke import presets
from pypeapp import config
@contextlib.contextmanager
@ -24,7 +23,7 @@ def preserve_trim(node):
offset_frame = None
if node['frame_mode'].value() == "start at":
start_at_frame = node['frame'].value()
if node['frame_mode'].value() is "offset":
if node['frame_mode'].value() == "offset":
offset_frame = node['frame'].value()
try:
@ -33,14 +32,14 @@ def preserve_trim(node):
if start_at_frame:
node['frame_mode'].setValue("start at")
node['frame'].setValue(str(script_start))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
if offset_frame:
node['frame_mode'].setValue("offset")
node['frame'].setValue(str((script_start + offset_frame)))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
def loader_shift(node, frame, relative=True):
@ -69,11 +68,37 @@ def loader_shift(node, frame, relative=True):
return int(script_start)
def add_review_presets_config():
returning = {
"families": list(),
"representations": list()
}
review_presets = config.get_presets()["plugins"]["global"]["publish"].get(
"ExtractReview", {})
outputs = review_presets.get("outputs", {})
#
for output, properities in outputs.items():
returning["representations"].append(output)
returning["families"] += properities.get("families", [])
return returning
class LoadMov(api.Loader):
"""Load mov file into Nuke"""
presets = add_review_presets_config()
families = [
"source",
"plate",
"render",
"review"] + presets["families"]
families = ["write", "source", "plate", "render", "review"]
representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"]
representations = [
"mov",
"preview",
"review",
"mp4"] + presets["representations"]
label = "Load mov"
order = -10
@ -85,47 +110,48 @@ class LoadMov(api.Loader):
containerise,
viewer_update_and_undo_stop
)
version = context['version']
version_data = version.get("data", {})
orig_first = version_data.get("frameStart", None)
orig_last = version_data.get("frameEnd", None)
orig_first = version_data.get("frameStart")
orig_last = version_data.get("frameEnd")
diff = orig_first - 1
# set first to 1
first = orig_first - diff
last = orig_last - diff
handles = version_data.get("handles", None)
handle_start = version_data.get("handleStart", None)
handle_end = version_data.get("handleEnd", None)
repr_cont = context["representation"]["context"]
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
handle_end = handles
handle_start = version_data.get("handleStart")
handle_end = version_data.get("handleEnd")
colorspace = version_data.get("colorspace")
repr_cont = context["representation"]["context"]
# create handles offset (only to last, because of mov)
last += handle_start + handle_end
# offset should be with handles so it match orig frame range
offset_frame = orig_first + handle_start
offset_frame = orig_first - handle_start
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
file = self.fname
if not file:
repr_id = context["representation"]["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
# TODO: it might be universal read to img/geo/camera
read_node = nuke.createNode(
"Read",
"name {}".format(read_name)
@ -139,7 +165,23 @@ class LoadMov(api.Loader):
read_node["last"].setValue(last)
read_node["frame_mode"].setValue("start at")
read_node["frame"].setValue(str(offset_frame))
# add additional metadata from the version to imprint to Avalon knob
if colorspace:
read_node["colorspace"].setValue(str(colorspace))
# load nuke presets for Read's colorspace
read_clrs_presets = presets.get_colorspace_preset().get(
"nuke", {}).get("read", {})
# check if any colorspace presets for read is mathing
preset_clrsp = next((read_clrs_presets[k]
for k in read_clrs_presets
if bool(re.search(k, file))),
None)
if preset_clrsp is not None:
read_node["colorspace"].setValue(str(preset_clrsp))
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "handles", "source", "author",
"fps", "version", "handleStart", "handleEnd"
@ -147,7 +189,7 @@ class LoadMov(api.Loader):
data_imprint = {}
for key in add_keys:
if key is 'version':
if key == 'version':
data_imprint.update({
key: context["version"]['name']
})
@ -186,10 +228,18 @@ class LoadMov(api.Loader):
)
node = nuke.toNode(container['objectName'])
# TODO: prepare also for other Read img/geo/camera
assert node.Class() == "Read", "Must be Read"
file = api.get_representation_path(representation)
file = self.fname
if not file:
repr_id = representation["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
# Get start frame from version data
version = io.find_one({
@ -207,20 +257,23 @@ class LoadMov(api.Loader):
version_data = version.get("data", {})
orig_first = version_data.get("frameStart", None)
orig_last = version_data.get("frameEnd", None)
orig_first = version_data.get("frameStart")
orig_last = version_data.get("frameEnd")
diff = orig_first - 1
# set first to 1
first = orig_first - diff
last = orig_last - diff
handles = version_data.get("handles", 0)
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
colorspace = version_data.get("colorspace")
if first is None:
log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(node['name'].value(), representation))
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(
node['name'].value(), representation))
first = 0
# fix handle start and end if none are available
@ -231,12 +284,12 @@ class LoadMov(api.Loader):
# create handles offset (only to last, because of mov)
last += handle_start + handle_end
# offset should be with handles so it match orig frame range
offset_frame = orig_first + handle_start
offset_frame = orig_first - handle_start
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file["path"])
log.info("__ node['file']: {}".format(node["file"].value()))
node["file"].setValue(file)
self.log.info("__ node['file']: {}".format(node["file"].value()))
# Set the global in to the start frame of the sequence
loader_shift(node, first, relative=True)
@ -247,19 +300,34 @@ class LoadMov(api.Loader):
node["frame_mode"].setValue("start at")
node["frame"].setValue(str(offset_frame))
if colorspace:
node["colorspace"].setValue(str(colorspace))
# load nuke presets for Read's colorspace
read_clrs_presets = presets.get_colorspace_preset().get(
"nuke", {}).get("read", {})
# check if any colorspace presets for read is mathing
preset_clrsp = next((read_clrs_presets[k]
for k in read_clrs_presets
if bool(re.search(k, file))),
None)
if preset_clrsp is not None:
node["colorspace"].setValue(str(preset_clrsp))
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"frameStart": version_data.get("frameStart"),
"frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"frameStart": str(first),
"frameEnd": str(last),
"version": str(version.get("name")),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
"handles": version_data.get("handles"),
"handleStart": version_data.get("handleStart"),
"handleEnd": version_data.get("handleEnd"),
"fps": version_data.get("fps"),
"handleStart": str(handle_start),
"handleEnd": str(handle_end),
"fps": str(version_data.get("fps")),
"author": version_data.get("author"),
"outputDir": version_data.get("outputDir"),
"outputDir": version_data.get("outputDir")
})
# change color of node
@ -272,7 +340,7 @@ class LoadMov(api.Loader):
update_container(
node, updated_dict
)
log.info("udated to version: {}".format(version.get("name")))
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):

View file

@ -1,11 +1,9 @@
import re
import nuke
import contextlib
from avalon import api, io
import nuke
from pype.api import Logger
log = Logger().get_logger(__name__, "nuke")
from pype.nuke import presets
@contextlib.contextmanager
@ -24,7 +22,7 @@ def preserve_trim(node):
offset_frame = None
if node['frame_mode'].value() == "start at":
start_at_frame = node['frame'].value()
if node['frame_mode'].value() is "offset":
if node['frame_mode'].value() == "offset":
offset_frame = node['frame'].value()
try:
@ -33,14 +31,14 @@ def preserve_trim(node):
if start_at_frame:
node['frame_mode'].setValue("start at")
node['frame'].setValue(str(script_start))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
if offset_frame:
node['frame_mode'].setValue("offset")
node['frame'].setValue(str((script_start + offset_frame)))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
def loader_shift(node, frame, relative=True):
@ -72,8 +70,8 @@ def loader_shift(node, frame, relative=True):
class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
families = ["write", "source", "plate", "render"]
representations = ["exr", "dpx", "jpg", "jpeg"]
families = ["render2d", "source", "plate", "render"]
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
label = "Load sequence"
order = -10
@ -89,11 +87,10 @@ class LoadSequence(api.Loader):
version = context['version']
version_data = version.get("data", {})
log.info("version_data: {}\n".format(version_data))
self.log.info("version_data: {}\n".format(version_data))
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
first = version_data.get("frameStart", None)
@ -106,21 +103,27 @@ class LoadSequence(api.Loader):
first -= self.handle_start
last += self.handle_end
file = self.fname.replace("\\", "/")
file = self.fname
log.info("file: {}\n".format(self.fname))
if not file:
repr_id = context["representation"]["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
repr_cont = context["representation"]["context"]
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
if "#" not in file:
frame = repr_cont.get("frame")
padding = len(frame)
file = file.replace(frame, "#"*padding)
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
# TODO: it might be universal read to img/geo/camera
@ -130,24 +133,36 @@ class LoadSequence(api.Loader):
r["file"].setValue(file)
# Set colorspace defined in version data
colorspace = context["version"]["data"].get("colorspace", None)
if colorspace is not None:
colorspace = context["version"]["data"].get("colorspace")
if colorspace:
r["colorspace"].setValue(str(colorspace))
# load nuke presets for Read's colorspace
read_clrs_presets = presets.get_colorspace_preset().get(
"nuke", {}).get("read", {})
# check if any colorspace presets for read is mathing
preset_clrsp = next((read_clrs_presets[k]
for k in read_clrs_presets
if bool(re.search(k, file))),
None)
if preset_clrsp is not None:
r["colorspace"].setValue(str(preset_clrsp))
loader_shift(r, first, relative=True)
r["origfirst"].setValue(int(first))
r["first"].setValue(int(first))
r["origlast"].setValue(int(last))
r["last"].setValue(int(last))
# add additional metadata from the version to imprint to Avalon knob
# add additional metadata from the version to imprint Avalon knob
add_keys = ["frameStart", "frameEnd",
"source", "colorspace", "author", "fps", "version",
"handleStart", "handleEnd"]
data_imprint = {}
for k in add_keys:
if k is 'version':
if k == 'version':
data_imprint.update({k: context["version"]['name']})
else:
data_imprint.update(
@ -179,7 +194,7 @@ class LoadSequence(api.Loader):
rtn["after"].setValue("continue")
rtn["input.first_lock"].setValue(True)
rtn["input.first"].setValue(
self.handle_start + self.first_frame
self.handle_start + self.first_frame
)
if time_warp_nodes != []:
@ -210,16 +225,29 @@ class LoadSequence(api.Loader):
"""
from avalon.nuke import (
ls_img_sequence,
update_container
)
node = nuke.toNode(container['objectName'])
# TODO: prepare also for other Read img/geo/camera
assert node.Class() == "Read", "Must be Read"
path = api.get_representation_path(representation)
file = ls_img_sequence(path)
repr_cont = representation["context"]
file = self.fname
if not file:
repr_id = representation["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
if "#" not in file:
frame = repr_cont.get("frame")
padding = len(frame)
file = file.replace(frame, "#"*padding)
# Get start frame from version data
version = io.find_one({
@ -241,13 +269,14 @@ class LoadSequence(api.Loader):
self.handle_start = version_data.get("handleStart", 0)
self.handle_end = version_data.get("handleEnd", 0)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
first = version_data.get("frameStart")
last = version_data.get("frameEnd")
if first is None:
log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(node['name'].value(), representation))
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(
node['name'].value(), representation))
first = 0
first -= self.handle_start
@ -255,8 +284,8 @@ class LoadSequence(api.Loader):
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file["path"])
log.info("__ node['file']: {}".format(node["file"].value()))
node["file"].setValue(file)
self.log.info("__ node['file']: {}".format(node["file"].value()))
# Set the global in to the start frame of the sequence
loader_shift(node, first, relative=True)
@ -268,14 +297,14 @@ class LoadSequence(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"frameStart": version_data.get("frameStart"),
"frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"frameStart": str(first),
"frameEnd": str(last),
"version": str(version.get("name")),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
"handleStart": version_data.get("handleStart"),
"handleEnd": version_data.get("handleEnd"),
"fps": version_data.get("fps"),
"handleStart": str(self.handle_start),
"handleEnd": str(self.handle_end),
"fps": str(version_data.get("fps")),
"author": version_data.get("author"),
"outputDir": version_data.get("outputDir"),
})
@ -296,7 +325,7 @@ class LoadSequence(api.Loader):
node,
updated_dict
)
log.info("udated to version: {}".format(version.get("name")))
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):

View file

@ -13,8 +13,10 @@ class CollectAssetInfo(pyblish.api.ContextPlugin):
]
def process(self, context):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
asset_data = io.find_one({
"type": "asset",
"name": api.Session["AVALON_ASSET"]
})
self.log.info("asset_data: {}".format(asset_data))
context.data['handles'] = int(asset_data["data"].get("handles", 0))

View file

@ -15,9 +15,10 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
hosts = ["nuke", "nukeassist"]
def process(self, context):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
asset_data = io.find_one({
"type": "asset",
"name": api.Session["AVALON_ASSET"]
})
self.log.debug("asset_data: {}".format(asset_data["data"]))
instances = []
@ -27,12 +28,15 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes()))
for node in nuke.allNodes():
if node.Class() in ["Viewer", "Dot"]:
continue
try:
if node["disable"].value():
continue
except Exception as E:
self.log.warning(E)
# get data from avalon knob
self.log.debug("node[name]: {}".format(node['name'].value()))

View file

@ -1,12 +1,12 @@
import pyblish.api
import nuke
class CollectReview(pyblish.api.InstancePlugin):
"""Collect review instance from rendered frames
"""
order = pyblish.api.CollectorOrder + 0.3
family = "review"
label = "Collect Review"
hosts = ["nuke"]
families = ["render", "render.local", "render.farm"]
@ -25,4 +25,6 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data["families"].append("review")
instance.data['families'].append('ftrack')
self.log.info("Review collected: `{}`".format(instance))
self.log.debug("__ instance.data: `{}`".format(instance.data))

View file

@ -0,0 +1,22 @@
import os
import pype.api as pype
import pyblish.api
class CollectScriptVersion(pyblish. api.ContextPlugin):
"""Collect Script Version."""
order = pyblish.api.CollectorOrder
label = "Collect Script Version"
hosts = [
"nuke",
"nukeassist"
]
def process(self, context):
file_path = context.data["currentFile"]
base_name = os.path.basename(file_path)
# get version string
version = pype.get_version_from_path(base_name)
context.data['version'] = version

View file

@ -0,0 +1,40 @@
import pyblish.api
import nuke
class CollectSlate(pyblish.api.InstancePlugin):
"""Check if SLATE node is in scene and connected to rendering tree"""
order = pyblish.api.CollectorOrder + 0.09
label = "Collect Slate Node"
hosts = ["nuke"]
families = ["write"]
def process(self, instance):
node = instance[0]
slate = next((n for n in nuke.allNodes()
if "slate" in n.name().lower()
if not n["disable"].getValue()),
None)
if slate:
# check if slate node is connected to write node tree
slate_check = 0
slate_node = None
while slate_check == 0:
try:
node = node.dependencies()[0]
if slate.name() in node.name():
slate_node = node
slate_check = 1
except IndexError:
break
if slate_node:
instance.data["slateNode"] = slate_node
instance.data["families"].append("slate")
self.log.info(
"Slate node is in node graph: `{}`".format(slate.name()))
self.log.debug(
"__ instance: `{}`".format(instance))

View file

@ -2,8 +2,6 @@ import nuke
import pyblish.api
import os
import pype.api as pype
from avalon.nuke import (
get_avalon_knob_data,
add_publish_knob
@ -11,7 +9,7 @@ from avalon.nuke import (
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Publish current script version."""
"""Collect current script for publish."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Workfile"
@ -31,9 +29,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
base_name = os.path.basename(file_path)
subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family)
# get version string
version = pype.get_version_from_path(base_name)
# Get frame range
first_frame = int(root["first_frame"].getValue())
last_frame = int(root["last_frame"].getValue())
@ -53,7 +48,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
script_data = {
"asset": os.getenv("AVALON_ASSET", None),
"version": version,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"resolutionWidth": resolution_width,
@ -78,8 +72,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"publish": root.knob('publish').value(),
"family": family,
"families": [family],
"representations": list(),
"subsetGroup": "workfiles"
"representations": list()
})
# adding basic script data

View file

@ -14,6 +14,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
families = ["write"]
def process(self, instance):
# adding 2d focused rendering
instance.data["families"].append("render2d")
node = None
for x in instance:
@ -50,9 +52,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# get version
version = pype.get_version_from_path(nuke.root().name())
instance.data['version'] = version
# get version to instance for integration
instance.data['version'] = instance.context.data.get(
"version", pype.get_version_from_path(nuke.root().name()))
self.log.debug('Write Version: %s' % instance.data('version'))
# create label
@ -94,12 +97,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"version": int(version),
"version": int(instance.data['version']),
"colorspace": node["colorspace"].value(),
"families": [instance.data["family"]],
"families": ["render"],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"
group_node = [x for x in instance if x.Class() == "Group"][0]
deadlineChunkSize = 1
@ -125,9 +129,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"families": families,
"colorspace": node["colorspace"].value(),
"deadlineChunkSize": deadlineChunkSize,
"deadlinePriority": deadlinePriority,
"subsetGroup": "renders"
"deadlinePriority": deadlinePriority
})
self.log.debug("instance.data: {}".format(instance.data))

View file

@ -28,6 +28,11 @@ class NukeRenderLocal(pype.api.Extractor):
self.log.debug("instance collected: {}".format(instance.data))
first_frame = instance.data.get("frameStart", None)
# exception for slate workflow
if "slate" in instance.data["families"]:
first_frame -= 1
last_frame = instance.data.get("frameEnd", None)
node_subset_name = instance.data.get("name", None)
@ -47,6 +52,10 @@ class NukeRenderLocal(pype.api.Extractor):
int(last_frame)
)
# exception for slate workflow
if "slate" in instance.data["families"]:
first_frame += 1
path = node['file'].value()
out_dir = os.path.dirname(path)
ext = node["file_type"].value()

View file

@ -41,7 +41,7 @@ class ExtractReviewDataLut(pype.api.Extractor):
with anlib.maintained_selection():
exporter = pnlib.ExporterReviewLut(
self, instance
)
)
data = exporter.generate_lut()
# assign to representations

View file

@ -3,7 +3,6 @@ import pyblish.api
from avalon.nuke import lib as anlib
from pype.nuke import lib as pnlib
import pype
reload(pnlib)
class ExtractReviewDataMov(pype.api.Extractor):
@ -16,23 +15,20 @@ class ExtractReviewDataMov(pype.api.Extractor):
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Review Data Mov"
families = ["review"]
families = ["review", "render", "render.local"]
hosts = ["nuke"]
def process(self, instance):
families = instance.data["families"]
self.log.info("Creating staging dir...")
if "representations" in instance.data:
staging_dir = instance.data[
"representations"][0]["stagingDir"].replace("\\", "/")
instance.data["stagingDir"] = staging_dir
instance.data["representations"][0]["tags"] = []
else:
instance.data["representations"] = []
# get output path
render_path = instance.data['path']
staging_dir = os.path.normpath(os.path.dirname(render_path))
instance.data["stagingDir"] = staging_dir
if "representations" not in instance.data:
instance.data["representations"] = list()
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
@ -46,6 +42,15 @@ class ExtractReviewDataMov(pype.api.Extractor):
instance.data["families"].remove("review")
instance.data["families"].remove("ftrack")
data = exporter.generate_mov(farm=True)
self.log.debug(
"_ data: {}".format(data))
instance.data.update({
"bakeRenderPath": data.get("bakeRenderPath"),
"bakeScriptPath": data.get("bakeScriptPath"),
"bakeWriteNodeName": data.get("bakeWriteNodeName")
})
else:
data = exporter.generate_mov()

Some files were not shown because too many files have changed in this diff Show more