Merge branch 'develop' into bugfix/ffmpeg_path_burnin_fix

This commit is contained in:
Jakub Jezek 2020-01-22 12:11:53 +01:00
commit eff28c1cb9
111 changed files with 9969 additions and 1950 deletions

34
pype/blender/__init__.py Normal file
View file

@ -0,0 +1,34 @@
import logging
from pathlib import Path
import os
import bpy
from avalon import api as avalon
from pyblish import api as pyblish
from .plugin import AssetLoader
logger = logging.getLogger("pype.blender")
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create")
def install():
"""Install Blender configuration for Avalon."""
pyblish.register_plugin_path(str(PUBLISH_PATH))
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
def uninstall():
"""Uninstall Blender configuration for Avalon."""
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))

47
pype/blender/action.py Normal file
View file

@ -0,0 +1,47 @@
import bpy
import pyblish.api
from ..action import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid objects in Blender when a publish plug-in failed."""
label = "Select Invalid"
on = "failed"
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning(
"Failed plug-in doens't have any selectable objects."
)
bpy.ops.object.select_all(action='DESELECT')
# Make sure every node is only processed once
invalid = list(set(invalid))
if not invalid:
self.log.info("No invalid nodes found.")
return
invalid_names = [obj.name for obj in invalid]
self.log.info(
"Selecting invalid objects: %s", ", ".join(invalid_names)
)
# Select the objects and also make the last one the active object.
for obj in invalid:
obj.select_set(True)
bpy.context.view_layer.objects.active = invalid[-1]

135
pype/blender/plugin.py Normal file
View file

@ -0,0 +1,135 @@
"""Shared functionality for pipeline plugins for Blender."""
from pathlib import Path
from typing import Dict, List, Optional
import bpy
from avalon import api
VALID_EXTENSIONS = [".blend"]
def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str:
"""Return a consistent name for a model asset."""
name = f"{asset}_{subset}"
if namespace:
name = f"{namespace}:{name}"
return name
class AssetLoader(api.Loader):
"""A basic AssetLoader for Blender
This will implement the basic logic for linking/appending assets
into another Blender scene.
The `update` method should be implemented by a sub-class, because
it's different for different types (e.g. model, rig, animation,
etc.).
"""
@staticmethod
def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]:
"""Get the 'instance empty' that holds the collection instance."""
for node in nodes:
if not isinstance(node, bpy.types.Object):
continue
if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION'
and node.instance_collection and node.name == instance_name):
return node
return None
@staticmethod
def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]:
"""Get the 'instance collection' (container) for this asset."""
for node in nodes:
if not isinstance(node, bpy.types.Collection):
continue
if node.name == instance_name:
return node
return None
@staticmethod
def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library:
"""Find the library file from the container.
It traverses the objects from this collection, checks if there is only
1 library from which the objects come from and returns the library.
Warning:
No nested collections are supported at the moment!
"""
assert not container.children, "Nested collections are not supported."
assert container.objects, "The collection doesn't contain any objects."
libraries = set()
for obj in container.objects:
assert obj.library, f"'{obj.name}' is not linked."
libraries.add(obj.library)
assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library."
return list(libraries)[0]
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def load(self,
context: dict,
name: Optional[str] = None,
namespace: Optional[str] = None,
options: Optional[Dict] = None) -> Optional[bpy.types.Collection]:
"""Load asset via database
Arguments:
context: Full parenthood of representation to load
name: Use pre-defined name
namespace: Use pre-defined namespace
options: Additional settings dictionary
"""
# TODO (jasper): make it possible to add the asset several times by
# just re-using the collection
assert Path(self.fname).exists(), f"{self.fname} doesn't exist."
self.process_asset(
context=context,
name=name,
namespace=namespace,
options=options,
)
# Only containerise if anything was loaded by the Loader.
nodes = self[:]
if not nodes:
return None
# Only containerise if it's not already a collection from a .blend file.
representation = context["representation"]["name"]
if representation != "blend":
from avalon.blender.pipeline import containerise
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__,
)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
instance_name = model_name(asset, subset, namespace)
return self._get_instance_collection(instance_name, nodes)
def update(self, container: Dict, representation: Dict):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def remove(self, container: Dict) -> bool:
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")

View file

@ -1,354 +1,606 @@
import os
import sys
import logging
import collections
import uuid
from datetime import datetime
from queue import Queue
from bson.objectid import ObjectId
import argparse
import ftrack_api
from pype.ftrack import BaseAction
from pype.ftrack.lib.io_nonsingleton import DbConnector
class DeleteAsset(BaseAction):
class DeleteAssetSubset(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = 'delete.asset'
identifier = "delete.asset.subset"
#: Action label.
label = 'Delete Asset/Subsets'
label = "Delete Asset/Subsets"
#: Action description.
description = 'Removes from Avalon with all childs and asset from Ftrack'
icon = '{}/ftrack/action_icons/DeleteAsset.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
description = "Removes from Avalon with all childs and asset from Ftrack"
icon = "{}/ftrack/action_icons/DeleteAsset.svg".format(
os.environ.get("PYPE_STATICS_SERVER", "")
)
#: roles that are allowed to register this action
role_list = ['Pypeclub', 'Administrator']
#: Db
db = DbConnector()
role_list = ["Pypeclub", "Administrator", "Project Manager"]
#: Db connection
dbcon = DbConnector()
value = None
splitter = {"type": "label", "value": "---"}
action_data_by_id = {}
asset_prefix = "asset:"
subset_prefix = "subset:"
def discover(self, session, entities, event):
''' Validation '''
if len(entities) != 1:
return False
""" Validation """
task_ids = []
for ent_info in event["data"]["selection"]:
entType = ent_info.get("entityType", "")
if entType == "task":
task_ids.append(ent_info["entityId"])
valid = ["task"]
entityType = event["data"]["selection"][0].get("entityType", "")
if entityType.lower() not in valid:
return False
return True
for entity in entities:
ftrack_id = entity["id"]
if ftrack_id not in task_ids:
continue
if entity.entity_type.lower() != "task":
return True
return False
def _launch(self, event):
self.reset_session()
try:
self.db.install()
args = self._translate_event(
self.session, event
)
if "values" not in event["data"]:
self.dbcon.install()
return self._interface(self.session, *args)
interface = self._interface(
self.session, *args
)
confirmation = self.confirm_delete(
True, *args
)
if interface:
return interface
confirmation = self.confirm_delete(*args)
if confirmation:
return confirmation
self.dbcon.install()
response = self.launch(
self.session, *args
)
finally:
self.db.uninstall()
self.dbcon.uninstall()
return self._handle_result(
self.session, response, *args
)
def interface(self, session, entities, event):
if not event['data'].get('values', {}):
self.attempt = 1
items = []
entity = entities[0]
title = 'Choose items to delete from "{}"'.format(entity['name'])
project = entity['project']
self.show_message(event, "Preparing data...", True)
items = []
title = "Choose items to delete"
self.db.Session['AVALON_PROJECT'] = project["full_name"]
# Filter selection and get ftrack ids
selection = event["data"].get("selection") or []
ftrack_ids = []
project_in_selection = False
for entity in selection:
entity_type = (entity.get("entityType") or "").lower()
if entity_type != "task":
if entity_type == "show":
project_in_selection = True
continue
av_entity = self.db.find_one({
'type': 'asset',
'name': entity['name']
ftrack_id = entity.get("entityId")
if not ftrack_id:
continue
ftrack_ids.append(ftrack_id)
if project_in_selection:
msg = "It is not possible to use this action on project entity."
self.show_message(event, msg, True)
# Filter event even more (skip task entities)
# - task entities are not relevant for avalon
for entity in entities:
ftrack_id = entity["id"]
if ftrack_id not in ftrack_ids:
continue
if entity.entity_type.lower() == "task":
ftrack_ids.remove(ftrack_id)
if not ftrack_ids:
# It is bug if this happens!
return {
"success": False,
"message": "Invalid selection for this action (Bug)"
}
if entities[0].entity_type.lower() == "project":
project = entities[0]
else:
project = entities[0]["project"]
project_name = project["full_name"]
self.dbcon.Session["AVALON_PROJECT"] = project_name
selected_av_entities = self.dbcon.find({
"type": "asset",
"data.ftrackId": {"$in": ftrack_ids}
})
selected_av_entities = [ent for ent in selected_av_entities]
if not selected_av_entities:
return {
"success": False,
"message": "Didn't found entities in avalon"
}
# Remove cached action older than 2 minutes
old_action_ids = []
for id, data in self.action_data_by_id.items():
created_at = data.get("created_at")
if not created_at:
old_action_ids.append(id)
continue
cur_time = datetime.now()
existing_in_sec = (created_at - cur_time).total_seconds()
if existing_in_sec > 60 * 2:
old_action_ids.append(id)
for id in old_action_ids:
self.action_data_by_id.pop(id, None)
# Store data for action id
action_id = str(uuid.uuid1())
self.action_data_by_id[action_id] = {
"attempt": 1,
"created_at": datetime.now(),
"project_name": project_name,
"subset_ids_by_name": {},
"subset_ids_by_parent": {}
}
id_item = {
"type": "hidden",
"name": "action_id",
"value": action_id
}
items.append(id_item)
asset_ids = [ent["_id"] for ent in selected_av_entities]
subsets_for_selection = self.dbcon.find({
"type": "subset",
"parent": {"$in": asset_ids}
})
asset_ending = ""
if len(selected_av_entities) > 1:
asset_ending = "s"
asset_title = {
"type": "label",
"value": "# Delete asset{}:".format(asset_ending)
}
asset_note = {
"type": "label",
"value": (
"<p><i>NOTE: Action will delete checked entities"
" in Ftrack and Avalon with all children entities and"
" published content.</i></p>"
)
}
items.append(asset_title)
items.append(asset_note)
asset_items = collections.defaultdict(list)
for asset in selected_av_entities:
ent_path_items = [project_name]
ent_path_items.extend(asset.get("data", {}).get("parents") or [])
ent_path_to_parent = "/".join(ent_path_items) + "/"
asset_items[ent_path_to_parent].append(asset)
for asset_parent_path, assets in sorted(asset_items.items()):
items.append({
"type": "label",
"value": "## <b>- {}</b>".format(asset_parent_path)
})
if av_entity is None:
return {
'success': False,
'message': 'Didn\'t found assets in avalon'
}
asset_label = {
'type': 'label',
'value': '## Delete whole asset: ##'
}
asset_item = {
'label': av_entity['name'],
'name': 'whole_asset',
'type': 'boolean',
'value': False
}
splitter = {
'type': 'label',
'value': '{}'.format(200*"-")
}
subset_label = {
'type': 'label',
'value': '## Subsets: ##'
}
if av_entity is not None:
items.append(asset_label)
items.append(asset_item)
items.append(splitter)
all_subsets = self.db.find({
'type': 'subset',
'parent': av_entity['_id']
for asset in assets:
items.append({
"label": asset["name"],
"name": "{}{}".format(
self.asset_prefix, str(asset["_id"])
),
"type": 'boolean',
"value": False
})
subset_items = []
for subset in all_subsets:
item = {
'label': subset['name'],
'name': str(subset['_id']),
'type': 'boolean',
'value': False
}
subset_items.append(item)
if len(subset_items) > 0:
items.append(subset_label)
items.extend(subset_items)
else:
return {
'success': False,
'message': 'Didn\'t found assets in avalon'
}
subset_ids_by_name = collections.defaultdict(list)
subset_ids_by_parent = collections.defaultdict(list)
for subset in subsets_for_selection:
subset_id = subset["_id"]
name = subset["name"]
parent_id = subset["parent"]
subset_ids_by_name[name].append(subset_id)
subset_ids_by_parent[parent_id].append(subset_id)
if not subset_ids_by_name:
return {
'items': items,
'title': title
"items": items,
"title": title
}
def confirm_delete(self, first_attempt, entities, event):
if first_attempt is True:
if 'values' not in event['data']:
return
subset_ending = ""
if len(subset_ids_by_name.keys()) > 1:
subset_ending = "s"
values = event['data']['values']
subset_title = {
"type": "label",
"value": "# Subset{} to delete:".format(subset_ending)
}
subset_note = {
"type": "label",
"value": (
"<p><i>WARNING: Subset{} will be removed"
" for all <b>selected</b> entities.</i></p>"
).format(subset_ending)
}
if len(values) <= 0:
return
if 'whole_asset' not in values:
return
else:
values = self.values
items.append(self.splitter)
items.append(subset_title)
items.append(subset_note)
title = 'Confirmation of deleting {}'
if values['whole_asset'] is True:
title = title.format(
'whole asset {}'.format(
entities[0]['name']
)
)
else:
subsets = []
for key, value in values.items():
if value is True:
subsets.append(key)
len_subsets = len(subsets)
if len_subsets == 0:
for name in subset_ids_by_name:
items.append({
"label": "<b>{}</b>".format(name),
"name": "{}{}".format(self.subset_prefix, name),
"type": "boolean",
"value": False
})
self.action_data_by_id[action_id]["subset_ids_by_parent"] = (
subset_ids_by_parent
)
self.action_data_by_id[action_id]["subset_ids_by_name"] = (
subset_ids_by_name
)
return {
"items": items,
"title": title
}
def confirm_delete(self, entities, event):
values = event["data"]["values"]
action_id = values.get("action_id")
spec_data = self.action_data_by_id.get(action_id)
if not spec_data:
# it is a bug if this happens!
return {
"success": False,
"message": "Something bad has happened. Please try again."
}
# Process Delete confirmation
delete_key = values.get("delete_key")
if delete_key:
delete_key = delete_key.lower().strip()
# Go to launch part if user entered `delete`
if delete_key == "delete":
return
# Skip whole process if user didn't enter any text
elif delete_key == "":
self.action_data_by_id.pop(action_id, None)
return {
'success': True,
'message': 'Nothing was selected to delete'
"success": True,
"message": "Deleting cancelled (delete entry was empty)"
}
elif len_subsets == 1:
title = title.format(
'{} subset'.format(len_subsets)
)
else:
title = title.format(
'{} subsets'.format(len_subsets)
)
# Get data to show again
to_delete = spec_data["to_delete"]
else:
to_delete = collections.defaultdict(list)
for key, value in values.items():
if not value:
continue
if key.startswith(self.asset_prefix):
_key = key.replace(self.asset_prefix, "")
to_delete["assets"].append(_key)
elif key.startswith(self.subset_prefix):
_key = key.replace(self.subset_prefix, "")
to_delete["subsets"].append(_key)
self.action_data_by_id[action_id]["to_delete"] = to_delete
asset_to_delete = len(to_delete.get("assets") or []) > 0
subset_to_delete = len(to_delete.get("subsets") or []) > 0
if not asset_to_delete and not subset_to_delete:
self.action_data_by_id.pop(action_id, None)
return {
"success": True,
"message": "Nothing was selected to delete"
}
attempt = spec_data["attempt"]
if attempt > 3:
self.action_data_by_id.pop(action_id, None)
return {
"success": False,
"message": "You didn't enter \"DELETE\" properly 3 times!"
}
self.action_data_by_id[action_id]["attempt"] += 1
title = "Confirmation of deleting"
if asset_to_delete:
asset_len = len(to_delete["assets"])
asset_ending = ""
if asset_len > 1:
asset_ending = "s"
title += " {} Asset{}".format(asset_len, asset_ending)
if subset_to_delete:
title += " and"
if subset_to_delete:
sub_len = len(to_delete["subsets"])
type_ending = ""
sub_ending = ""
if sub_len == 1:
subset_ids_by_name = spec_data["subset_ids_by_name"]
if len(subset_ids_by_name[to_delete["subsets"][0]]) > 1:
sub_ending = "s"
elif sub_len > 1:
type_ending = "s"
sub_ending = "s"
title += " {} type{} of subset{}".format(
sub_len, type_ending, sub_ending
)
self.values = values
items = []
id_item = {"type": "hidden", "name": "action_id", "value": action_id}
delete_label = {
'type': 'label',
'value': '# Please enter "DELETE" to confirm #'
}
delete_item = {
'name': 'delete_key',
'type': 'text',
'value': '',
'empty_text': 'Type Delete here...'
"name": "delete_key",
"type": "text",
"value": "",
"empty_text": "Type Delete here..."
}
items.append(id_item)
items.append(delete_label)
items.append(delete_item)
return {
'items': items,
'title': title
"items": items,
"title": title
}
def launch(self, session, entities, event):
if 'values' not in event['data']:
return
values = event['data']['values']
if len(values) <= 0:
return
if 'delete_key' not in values:
return
if values['delete_key'].lower() != 'delete':
if values['delete_key'].lower() == '':
return {
'success': False,
'message': 'Deleting cancelled'
}
if self.attempt < 3:
self.attempt += 1
return_dict = self.confirm_delete(False, entities, event)
return_dict['title'] = '{} ({} attempt)'.format(
return_dict['title'], self.attempt
)
return return_dict
self.show_message(event, "Processing...", True)
values = event["data"]["values"]
action_id = values.get("action_id")
spec_data = self.action_data_by_id.get(action_id)
if not spec_data:
# it is a bug if this happens!
return {
'success': False,
'message': 'You didn\'t enter "DELETE" properly 3 times!'
"success": False,
"message": "Something bad has happened. Please try again."
}
entity = entities[0]
project = entity['project']
report_messages = collections.defaultdict(list)
self.db.Session['AVALON_PROJECT'] = project["full_name"]
project_name = spec_data["project_name"]
to_delete = spec_data["to_delete"]
self.dbcon.Session["AVALON_PROJECT"] = project_name
all_ids = []
if self.values.get('whole_asset', False) is True:
av_entity = self.db.find_one({
'type': 'asset',
'name': entity['name']
assets_to_delete = to_delete.get("assets") or []
subsets_to_delete = to_delete.get("subsets") or []
# Convert asset ids to ObjectId obj
assets_to_delete = [ObjectId(id) for id in assets_to_delete if id]
subset_ids_by_parent = spec_data["subset_ids_by_parent"]
subset_ids_by_name = spec_data["subset_ids_by_name"]
subset_ids_to_archive = []
asset_ids_to_archive = []
ftrack_ids_to_delete = []
if len(assets_to_delete) > 0:
# Prepare data when deleting whole avalon asset
avalon_assets = self.dbcon.find({"type": "asset"})
avalon_assets_by_parent = collections.defaultdict(list)
for asset in avalon_assets:
parent_id = asset["data"]["visualParent"]
avalon_assets_by_parent[parent_id].append(asset)
if asset["_id"] in assets_to_delete:
ftrack_id = asset["data"]["ftrackId"]
ftrack_ids_to_delete.append(ftrack_id)
children_queue = Queue()
for mongo_id in assets_to_delete:
children_queue.put(mongo_id)
while not children_queue.empty():
mongo_id = children_queue.get()
if mongo_id in asset_ids_to_archive:
continue
asset_ids_to_archive.append(mongo_id)
for subset_id in subset_ids_by_parent.get(mongo_id, []):
if subset_id not in subset_ids_to_archive:
subset_ids_to_archive.append(subset_id)
children = avalon_assets_by_parent.get(mongo_id)
if not children:
continue
for child in children:
child_id = child["_id"]
if child_id not in asset_ids_to_archive:
children_queue.put(child_id)
# Prepare names of assets in ftrack and ids of subsets in mongo
asset_names_to_delete = []
if len(subsets_to_delete) > 0:
for name in subsets_to_delete:
asset_names_to_delete.append(name)
for subset_id in subset_ids_by_name[name]:
if subset_id in subset_ids_to_archive:
continue
subset_ids_to_archive.append(subset_id)
# Get ftrack ids of entities where will be delete only asset
not_deleted_entities_id = []
ftrack_id_name_map = {}
if asset_names_to_delete:
for entity in entities:
ftrack_id = entity["id"]
ftrack_id_name_map[ftrack_id] = entity["name"]
if ftrack_id in ftrack_ids_to_delete:
continue
not_deleted_entities_id.append(ftrack_id)
mongo_proc_txt = "MongoProcessing: "
ftrack_proc_txt = "Ftrack processing: "
if asset_ids_to_archive:
self.log.debug("{}Archivation of assets <{}>".format(
mongo_proc_txt,
", ".join([str(id) for id in asset_ids_to_archive])
))
self.dbcon.update_many(
{
"_id": {"$in": asset_ids_to_archive},
"type": "asset"
},
{"$set": {"type": "archived_asset"}}
)
if subset_ids_to_archive:
self.log.debug("{}Archivation of subsets <{}>".format(
mongo_proc_txt,
", ".join([str(id) for id in subset_ids_to_archive])
))
self.dbcon.update_many(
{
"_id": {"$in": subset_ids_to_archive},
"type": "subset"
},
{"$set": {"type": "archived_subset"}}
)
if ftrack_ids_to_delete:
self.log.debug("{}Deleting Ftrack Entities <{}>".format(
ftrack_proc_txt, ", ".join(ftrack_ids_to_delete)
))
joined_ids_to_delete = ", ".join(
["\"{}\"".format(id) for id in ftrack_ids_to_delete]
)
ftrack_ents_to_delete = self.session.query(
"select id, link from TypedContext where id in ({})".format(
joined_ids_to_delete
)
).all()
for entity in ftrack_ents_to_delete:
self.session.delete(entity)
try:
self.session.commit()
except Exception:
ent_path = "/".join(
[ent["name"] for ent in entity["link"]]
)
msg = "Failed to delete entity"
report_messages[msg].append(ent_path)
self.session.rollback()
self.log.warning(
"{} <{}>".format(msg, ent_path),
exc_info=True
)
if not_deleted_entities_id:
joined_not_deleted = ", ".join([
"\"{}\"".format(ftrack_id)
for ftrack_id in not_deleted_entities_id
])
joined_asset_names = ", ".join([
"\"{}\"".format(name)
for name in asset_names_to_delete
])
# Find assets of selected entities with names of checked subsets
assets = self.session.query((
"select id from Asset where"
" context_id in ({}) and name in ({})"
).format(joined_not_deleted, joined_asset_names)).all()
self.log.debug("{}Deleting Ftrack Assets <{}>".format(
ftrack_proc_txt,
", ".join([asset["id"] for asset in assets])
))
for asset in assets:
self.session.delete(asset)
try:
self.session.commit()
except Exception:
self.session.rollback()
msg = "Failed to delete asset"
report_messages[msg].append(asset["id"])
self.log.warning(
"{} <{}>".format(asset["id"]),
exc_info=True
)
return self.report_handle(report_messages, project_name, event)
def report_handle(self, report_messages, project_name, event):
if not report_messages:
return {
"success": True,
"message": "Deletion was successful!"
}
title = "Delete report ({}):".format(project_name)
items = []
items.append({
"type": "label",
"value": "# Deleting was not completely successful"
})
items.append({
"type": "label",
"value": "<p><i>Check logs for more information</i></p>"
})
for msg, _items in report_messages.items():
if not _items or not msg:
continue
items.append({
"type": "label",
"value": "# {}".format(msg)
})
if av_entity is not None:
all_ids.append(av_entity['_id'])
all_ids.extend(self.find_child(av_entity))
if isinstance(_items, str):
_items = [_items]
items.append({
"type": "label",
"value": '<p>{}</p>'.format("<br>".join(_items))
})
items.append(self.splitter)
session.delete(entity)
session.commit()
else:
subset_names = []
for key, value in self.values.items():
if key == 'delete_key' or value is False:
continue
entity_id = ObjectId(key)
av_entity = self.db.find_one({'_id': entity_id})
subset_names.append(av_entity['name'])
if av_entity is None:
continue
all_ids.append(entity_id)
all_ids.extend(self.find_child(av_entity))
for ft_asset in entity['assets']:
if ft_asset['name'] in subset_names:
session.delete(ft_asset)
session.commit()
if len(all_ids) == 0:
return {
'success': True,
'message': 'No entities to delete in avalon'
}
delete_query = {'_id': {'$in': all_ids}}
self.db.delete_many(delete_query)
self.show_interface(items, title, event)
return {
'success': True,
'message': 'All assets were deleted!'
"success": False,
"message": "Deleting finished. Read report messages."
}
def find_child(self, entity):
output = []
id = entity['_id']
visuals = [x for x in self.db.find({'data.visualParent': id})]
assert len(visuals) == 0, 'This asset has another asset as child'
childs = self.db.find({'parent': id})
for child in childs:
output.append(child['_id'])
output.extend(self.find_child(child))
return output
def find_assets(self, asset_names):
assets = []
for name in asset_names:
entity = self.db.find_one({
'type': 'asset',
'name': name
})
if entity is not None and entity not in assets:
assets.append(entity)
return assets
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
DeleteAsset(session, plugins_presets).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
DeleteAssetSubset(session, plugins_presets).register()

View file

@ -1,175 +0,0 @@
import os
import sys
import logging
import argparse
import ftrack_api
from pype.ftrack import BaseAction
from pype.ftrack.lib.io_nonsingleton import DbConnector
class AssetsRemover(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = 'remove.assets'
#: Action label.
label = "Pype Admin"
variant = '- Delete Assets by Name'
#: Action description.
description = 'Removes assets from Ftrack and Avalon db with all childs'
#: roles that are allowed to register this action
role_list = ['Pypeclub', 'Administrator']
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
#: Db
db = DbConnector()
def discover(self, session, entities, event):
''' Validation '''
if len(entities) != 1:
return False
valid = ["show", "task"]
entityType = event["data"]["selection"][0].get("entityType", "")
if entityType.lower() not in valid:
return False
return True
def interface(self, session, entities, event):
if not event['data'].get('values', {}):
title = 'Enter Asset names to delete'
items = []
for i in range(15):
item = {
'label': 'Asset {}'.format(i+1),
'name': 'asset_{}'.format(i+1),
'type': 'text',
'value': ''
}
items.append(item)
return {
'items': items,
'title': title
}
def launch(self, session, entities, event):
entity = entities[0]
if entity.entity_type.lower() != 'Project':
project = entity['project']
else:
project = entity
if 'values' not in event['data']:
return
values = event['data']['values']
if len(values) <= 0:
return {
'success': True,
'message': 'No Assets to delete!'
}
asset_names = []
for k, v in values.items():
if v.replace(' ', '') != '':
asset_names.append(v)
self.db.install()
self.db.Session['AVALON_PROJECT'] = project["full_name"]
assets = self.find_assets(asset_names)
all_ids = []
for asset in assets:
all_ids.append(asset['_id'])
all_ids.extend(self.find_child(asset))
if len(all_ids) == 0:
self.db.uninstall()
return {
'success': True,
'message': 'None of assets'
}
delete_query = {'_id': {'$in': all_ids}}
self.db.delete_many(delete_query)
self.db.uninstall()
return {
'success': True,
'message': 'All assets were deleted!'
}
def find_child(self, entity):
output = []
id = entity['_id']
visuals = [x for x in self.db.find({'data.visualParent': id})]
assert len(visuals) == 0, 'This asset has another asset as child'
childs = self.db.find({'parent': id})
for child in childs:
output.append(child['_id'])
output.extend(self.find_child(child))
return output
def find_assets(self, asset_names):
assets = []
for name in asset_names:
entity = self.db.find_one({
'type': 'asset',
'name': name
})
if entity is not None and entity not in assets:
assets.append(entity)
return assets
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
AssetsRemover(session, plugins_presets).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -0,0 +1,538 @@
import os
import copy
import shutil
import collections
import string
import clique
from bson.objectid import ObjectId
from avalon import pipeline
from avalon.vendor import filelink
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
from pypeapp import Anatomy
from pype.ftrack import BaseAction
from pype.ftrack.lib.avalon_sync import CustAttrIdKey
class Delivery(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = "delivery.action"
#: Action label.
label = "Delivery"
#: Action description.
description = "Deliver data to client"
#: roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project manager"]
icon = '{}/ftrack/action_icons/Delivery.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
db_con = DbConnector()
def discover(self, session, entities, event):
''' Validation '''
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
return False
def interface(self, session, entities, event):
if event["data"].get("values", {}):
return
title = "Delivery data to Client"
items = []
item_splitter = {"type": "label", "value": "---"}
# Prepare component names for processing
components = None
project = None
for entity in entities:
if project is None:
project_id = None
for ent_info in entity["link"]:
if ent_info["type"].lower() == "project":
project_id = ent_info["id"]
break
if project_id is None:
project = entity["asset"]["parent"]["project"]
else:
project = session.query((
"select id, full_name from Project where id is \"{}\""
).format(project_id)).one()
_components = set(
[component["name"] for component in entity["components"]]
)
if components is None:
components = _components
continue
components = components.intersection(_components)
if not components:
break
project_name = project["full_name"]
items.append({
"type": "hidden",
"name": "__project_name__",
"value": project_name
})
# Prpeare anatomy data
anatomy = Anatomy(project_name)
new_anatomies = []
first = None
for key in (anatomy.templates.get("delivery") or {}):
new_anatomies.append({
"label": key,
"value": key
})
if first is None:
first = key
skipped = False
# Add message if there are any common components
if not components or not new_anatomies:
skipped = True
items.append({
"type": "label",
"value": "<h1>Something went wrong:</h1>"
})
items.append({
"type": "hidden",
"name": "__skipped__",
"value": skipped
})
if not components:
if len(entities) == 1:
items.append({
"type": "label",
"value": (
"- Selected entity doesn't have components to deliver."
)
})
else:
items.append({
"type": "label",
"value": (
"- Selected entities don't have common components."
)
})
# Add message if delivery anatomies are not set
if not new_anatomies:
items.append({
"type": "label",
"value": (
"- `\"delivery\"` anatomy key is not set in config."
)
})
# Skip if there are any data shortcomings
if skipped:
return {
"items": items,
"title": title
}
items.append({
"value": "<h1>Choose Components to deliver</h1>",
"type": "label"
})
for component in components:
items.append({
"type": "boolean",
"value": False,
"label": component,
"name": component
})
items.append(item_splitter)
items.append({
"value": "<h2>Location for delivery</h2>",
"type": "label"
})
items.append({
"type": "label",
"value": (
"<i>NOTE: It is possible to replace `root` key in anatomy.</i>"
)
})
items.append({
"type": "text",
"name": "__location_path__",
"empty_text": "Type location path here...(Optional)"
})
items.append(item_splitter)
items.append({
"value": "<h2>Anatomy of delivery files</h2>",
"type": "label"
})
items.append({
"type": "label",
"value": (
"<p><i>NOTE: These can be set in Anatomy.yaml"
" within `delivery` key.</i></p>"
)
})
items.append({
"type": "enumerator",
"name": "__new_anatomies__",
"data": new_anatomies,
"value": first
})
return {
"items": items,
"title": title
}
def launch(self, session, entities, event):
if "values" not in event["data"]:
return
self.report_items = collections.defaultdict(list)
values = event["data"]["values"]
skipped = values.pop("__skipped__")
if skipped:
return None
component_names = []
location_path = values.pop("__location_path__")
anatomy_name = values.pop("__new_anatomies__")
project_name = values.pop("__project_name__")
for key, value in values.items():
if value is True:
component_names.append(key)
if not component_names:
return {
"success": True,
"message": "Not selected components to deliver."
}
location_path = location_path.strip()
if location_path:
location_path = os.path.normpath(location_path)
if not os.path.exists(location_path):
return {
"success": False,
"message": (
"Entered location path does not exists. \"{}\""
).format(location_path)
}
self.db_con.install()
self.db_con.Session["AVALON_PROJECT"] = project_name
repres_to_deliver = []
for entity in entities:
asset = entity["asset"]
subset_name = asset["name"]
version = entity["version"]
parent = asset["parent"]
parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if parent_mongo_id:
parent_mongo_id = ObjectId(parent_mongo_id)
else:
asset_ent = self.db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
if not asset_ent:
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
msg = "Not synchronized entities to avalon"
self.report_items[msg].append(ent_path)
self.log.warning("{} <{}>".format(msg, ent_path))
continue
parent_mongo_id = asset_ent["_id"]
subset_ent = self.db_con.find_one({
"type": "subset",
"parent": parent_mongo_id,
"name": subset_name
})
version_ent = self.db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
repre_ents = self.db_con.find({
"type": "representation",
"parent": version_ent["_id"]
})
repres_by_name = {}
for repre in repre_ents:
repre_name = repre["name"]
repres_by_name[repre_name] = repre
for component in entity["components"]:
comp_name = component["name"]
if comp_name not in component_names:
continue
repre = repres_by_name.get(comp_name)
repres_to_deliver.append(repre)
if not location_path:
location_path = os.environ.get("AVALON_PROJECTS") or ""
print(location_path)
anatomy = Anatomy(project_name)
for repre in repres_to_deliver:
# Get destination repre path
anatomy_data = copy.deepcopy(repre["context"])
anatomy_data["root"] = location_path
anatomy_filled = anatomy.format(anatomy_data)
test_path = (
anatomy_filled
.get("delivery", {})
.get(anatomy_name)
)
if not test_path:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(anatomy_name)
all_anatomies = anatomy.format_all(anatomy_data)
result = None
for anatomies in all_anatomies.values():
for key, temp in anatomies.get("delivery", {}).items():
if key != anatomy_name:
continue
result = temp
break
# TODO log error! - missing keys in anatomy
if result:
missing_keys = [
key[1] for key in string.Formatter().parse(result)
if key[1] is not None
]
else:
missing_keys = ["unknown"]
keys = ", ".join(missing_keys)
sub_msg = (
"Representation: {}<br>- Missing keys: \"{}\"<br>"
).format(str(repre["_id"]), keys)
self.report_items[msg].append(sub_msg)
self.log.warning(
"{} Representation: \"{}\" Filled: <{}>".format(
msg, str(repre["_id"]), str(result)
)
)
continue
# Get source repre path
frame = repre['context'].get('frame')
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
repre_path = self.path_from_represenation(repre)
# TODO add backup solution where root of path from component
# is repalced with AVALON_PROJECTS root
if not frame:
self.process_single_file(
repre_path, anatomy, anatomy_name, anatomy_data
)
else:
self.process_sequence(
repre_path, anatomy, anatomy_name, anatomy_data
)
self.db_con.uninstall()
return self.report()
def process_single_file(
self, repre_path, anatomy, anatomy_name, anatomy_data
):
anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
self.copy_file(repre_path, delivery_path)
def process_sequence(
self, repre_path, anatomy, anatomy_name, anatomy_data
):
dir_path, file_name = os.path.split(str(repre_path))
base_name, ext = os.path.splitext(file_name)
file_name_items = None
if "#" in base_name:
file_name_items = [part for part in base_name.split("#") if part]
elif "%" in base_name:
file_name_items = base_name.split("%")
if not file_name_items:
msg = "Source file was not found"
self.report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
# skip if collection don't have same basename
if not col.head.startswith(file_name_items[0]):
continue
src_collection = col
break
if src_collection is None:
# TODO log error!
msg = "Source collection of files was not found"
self.report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name]
print(delivery_path)
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
self.copy_file(src, dst)
def path_from_represenation(self, representation):
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = os.environ.get("AVALON_PROJECTS") or ""
path = pipeline.format_template_with_optional_keys(
context, template
)
except KeyError:
# Template references unavailable data
return None
return os.path.normpath(path)
def copy_file(self, src_path, dst_path):
if os.path.exists(dst_path):
return
try:
filelink.create(
src_path,
dst_path,
filelink.HARDLINK
)
except OSError:
shutil.copyfile(src_path, dst_path)
def report(self):
items = []
title = "Delivery report"
for msg, _items in self.report_items.items():
if not _items:
continue
if items:
items.append({"type": "label", "value": "---"})
items.append({
"type": "label",
"value": "# {}".format(msg)
})
if not isinstance(_items, (list, tuple)):
_items = [_items]
__items = []
for item in _items:
__items.append(str(item))
items.append({
"type": "label",
"value": '<p>{}</p>'.format("<br>".join(__items))
})
if not items:
return {
"success": True,
"message": "Delivery Finished"
}
return {
"items": items,
"title": title,
"success": False,
"message": "Delivery Finished"
}
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
Delivery(session, plugins_presets).register()

View file

@ -9,7 +9,7 @@ class SeedDebugProject(BaseAction):
#: Action identifier.
identifier = "seed.debug.project"
#: Action label.
label = "SeedDebugProject"
label = "Seed Debug Project"
#: Action description.
description = "Description"
#: priority
@ -265,6 +265,15 @@ class SeedDebugProject(BaseAction):
def create_assets(self, project, asset_count):
self.log.debug("*** Creating assets:")
try:
asset_count = int(asset_count)
except ValueError:
asset_count = 0
if asset_count <= 0:
self.log.debug("No assets to create")
return
main_entity = self.session.create("Folder", {
"name": "Assets",
"parent": project
@ -305,6 +314,31 @@ class SeedDebugProject(BaseAction):
def create_shots(self, project, seq_count, shots_count):
self.log.debug("*** Creating shots:")
# Convert counts to integers
try:
seq_count = int(seq_count)
except ValueError:
seq_count = 0
try:
shots_count = int(shots_count)
except ValueError:
shots_count = 0
# Check if both are higher than 0
missing = []
if seq_count <= 0:
missing.append("sequences")
if shots_count <= 0:
missing.append("shots")
if missing:
self.log.debug("No {} to create".format(" and ".join(missing)))
return
# Create Folder "Shots"
main_entity = self.session.create("Folder", {
"name": "Shots",
"parent": project

View file

@ -70,7 +70,10 @@ class SyncToAvalonLocal(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
self.entities_factory.launch_setup(ft_project_name)
output = self.entities_factory.launch_setup(ft_project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()

View file

@ -105,7 +105,10 @@ class SyncToAvalonServer(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
self.entities_factory.launch_setup(ft_project_name)
output = self.entities_factory.launch_setup(ft_project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()

View file

@ -28,7 +28,7 @@ class SyncToAvalonEvent(BaseEvent):
ignore_entTypes = [
"socialfeed", "socialnotification", "note",
"assetversion", "job", "user", "reviewsessionobject", "timer",
"timelog", "auth_userrole"
"timelog", "auth_userrole", "appointment"
]
ignore_ent_types = ["Milestone"]
ignore_keys = ["statusid"]
@ -131,7 +131,9 @@ class SyncToAvalonEvent(BaseEvent):
ftrack_id = proj["data"]["ftrackId"]
self._avalon_ents_by_ftrack_id[ftrack_id] = proj
for ent in ents:
ftrack_id = ent["data"]["ftrackId"]
ftrack_id = ent["data"].get("ftrackId")
if ftrack_id is None:
continue
self._avalon_ents_by_ftrack_id[ftrack_id] = ent
return self._avalon_ents_by_ftrack_id
@ -1427,6 +1429,93 @@ class SyncToAvalonEvent(BaseEvent):
parent_id = ent_info["parentId"]
new_tasks_by_parent[parent_id].append(ent_info)
pop_out_ents.append(ftrack_id)
continue
name = (
ent_info
.get("changes", {})
.get("name", {})
.get("new")
)
avalon_ent_by_name = self.avalon_ents_by_name.get(name)
avalon_ent_by_name_ftrack_id = (
avalon_ent_by_name
.get("data", {})
.get("ftrackId")
)
if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None:
ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
if not ftrack_ent:
ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], ftrack_id
)
).one()
self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
avalon_ent_parents = (
avalon_ent_by_name.get("data", {}).get("parents")
)
if parents == avalon_ent_parents:
self.dbcon.update_one({
"_id": avalon_ent_by_name["_id"]
}, {
"$set": {
"data.ftrackId": ftrack_id,
"data.entityType": entity_type
}
})
avalon_ent_by_name["data"]["ftrackId"] = ftrack_id
avalon_ent_by_name["data"]["entityType"] = entity_type
self._avalon_ents_by_ftrack_id[ftrack_id] = (
avalon_ent_by_name
)
if self._avalon_ents_by_parent_id:
found = None
for _parent_id_, _entities_ in (
self._avalon_ents_by_parent_id.items()
):
for _idx_, entity in enumerate(_entities_):
if entity["_id"] == avalon_ent_by_name["_id"]:
found = (_parent_id_, _idx_)
break
if found:
break
if found:
_parent_id_, _idx_ = found
self._avalon_ents_by_parent_id[_parent_id_][
_idx_] = avalon_ent_by_name
if self._avalon_ents_by_id:
self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = (
avalon_ent_by_name
)
if self._avalon_ents_by_name:
self._avalon_ents_by_name[name] = avalon_ent_by_name
if self._avalon_ents:
found = None
project, entities = self._avalon_ents
for _idx_, _ent_ in enumerate(entities):
if _ent_["_id"] != avalon_ent_by_name["_id"]:
continue
found = _idx_
break
if found is not None:
entities[found] = avalon_ent_by_name
self._avalon_ents = project, entities
pop_out_ents.append(ftrack_id)
continue
configuration_id = entity_type_conf_ids.get(entity_type)
if not configuration_id:
@ -1438,9 +1527,11 @@ class SyncToAvalonEvent(BaseEvent):
if attr["entity_type"] != ent_info["entityType"]:
continue
if ent_info["entityType"] != "show":
if attr["object_type_id"] != ent_info["objectTypeId"]:
continue
if (
ent_info["entityType"] == "task" and
attr["object_type_id"] != ent_info["objectTypeId"]
):
continue
configuration_id = attr["id"]
entity_type_conf_ids[entity_type] = configuration_id
@ -1712,7 +1803,8 @@ class SyncToAvalonEvent(BaseEvent):
if ca_ent_type == "show":
cust_attrs_by_obj_id[ca_ent_type][key] = cust_attr
else:
elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
cust_attrs_by_obj_id[obj_id][key] = cust_attr

View file

@ -4,6 +4,7 @@ from pypeapp import config
class VersionToTaskStatus(BaseEvent):
# Presets usage
default_status_mapping = {}
def launch(self, session, event):
@ -11,69 +12,123 @@ class VersionToTaskStatus(BaseEvent):
# start of event procedure ----------------------------------
for entity in event['data'].get('entities', []):
# Filter non-assetversions
if (
entity['entityType'] == 'assetversion' and
'statusid' in (entity.get('keys') or [])
):
# Filter AssetVersions
if entity["entityType"] != "assetversion":
continue
version = session.get('AssetVersion', entity['entityId'])
try:
version_status = session.get(
'Status', entity['changes']['statusid']['new']
)
except Exception:
# Skip if statusid not in keys (in changes)
keys = entity.get("keys")
if not keys or "statusid" not in keys:
continue
# Get new version task name
version_status_id = (
entity
.get("changes", {})
.get("statusid", {})
.get("new", {})
)
# Just check that `new` is set to any value
if not version_status_id:
continue
try:
version_status = session.get("Status", version_status_id)
except Exception:
self.log.warning(
"Troubles with query status id [ {} ]".format(
version_status_id
),
exc_info=True
)
if not version_status:
continue
version_status_orig = version_status["name"]
# Load status mapping from presets
status_mapping = (
config.get_presets()
.get("ftrack", {})
.get("ftrack_config", {})
.get("status_version_to_task")
) or self.default_status_mapping
# Skip if mapping is empty
if not status_mapping:
continue
# Lower version status name and check if has mapping
version_status = version_status_orig.lower()
new_status_names = []
mapped = status_mapping.get(version_status)
if mapped:
new_status_names.extend(list(mapped))
new_status_names.append(version_status)
self.log.debug(
"Processing AssetVersion status change: [ {} ]".format(
version_status_orig
)
)
# Lower all names from presets
new_status_names = [name.lower() for name in new_status_names]
# Get entities necessary for processing
version = session.get("AssetVersion", entity["entityId"])
task = version.get("task")
if not task:
continue
project_schema = task["project"]["project_schema"]
# Get all available statuses for Task
statuses = project_schema.get_statuses("Task", task["type_id"])
# map lowered status name with it's object
stat_names_low = {
status["name"].lower(): status for status in statuses
}
new_status = None
for status_name in new_status_names:
if status_name not in stat_names_low:
continue
task_status = version_status
task = version['task']
self.log.info('>>> version status: [ {} ]'.format(
version_status['name']))
version_name_low = version_status['name'].lower()
# store object of found status
new_status = stat_names_low[status_name]
self.log.debug("Status to set: [ {} ]".format(
new_status["name"]
))
break
status_mapping = (
config.get_presets()
.get("ftrack", {})
.get("ftrack_config", {})
.get("status_version_to_task")
) or self.default_status_mapping
# Skip if status names were not found for paticulat entity
if not new_status:
self.log.warning(
"Any of statuses from presets can be set: {}".format(
str(new_status_names)
)
)
continue
status_to_set = status_mapping.get(version_name_low)
# Get full path to task for logging
ent_path = "/".join([ent["name"] for ent in task["link"]])
self.log.info(
'>>> status to set: [ {} ]'.format(status_to_set))
if status_to_set is not None:
query = 'Status where name is "{}"'.format(status_to_set)
try:
task_status = session.query(query).one()
except Exception:
self.log.info(
'!!! status was not found in Ftrack [ {} ]'.format(
status_to_set
)
)
continue
# Proceed if the task status was set
if task_status is not None:
# Get path to task
path = task['name']
for p in task['ancestors']:
path = p['name'] + '/' + path
# Setting task status
try:
task['status'] = task_status
session.commit()
except Exception as e:
session.rollback()
self.log.warning('!!! [ {} ] status couldnt be set:\
[ {} ]'.format(path, e))
session.rollback()
else:
self.log.info('>>> [ {} ] updated to [ {} ]'.format(
path, task_status['name']))
# Setting task status
try:
task["status"] = new_status
session.commit()
self.log.debug("[ {} ] Status updated to [ {} ]".format(
ent_path, new_status['name']
))
except Exception:
session.rollback()
self.log.warning(
"[ {} ]Status couldn't be set".format(ent_path),
exc_info=True
)
def register(session, plugins_presets):

View file

@ -1,10 +1,32 @@
import os
import sys
import logging
import getpass
import atexit
import tempfile
import threading
import datetime
import time
import queue
import pymongo
import requests
import ftrack_api
import ftrack_api.session
import ftrack_api.cache
import ftrack_api.operation
import ftrack_api._centralized_storage_scenario
import ftrack_api.event
from ftrack_api.logging import LazyLogMessage as L
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
from pypeapp import Logger
from pype.ftrack.lib.custom_db_connector import DbConnector
def ftrack_events_mongo_settings():
host = None
@ -49,7 +71,9 @@ def ftrack_events_mongo_settings():
def get_ftrack_event_mongo_info():
host, port, database, username, password, collection, auth_db = ftrack_events_mongo_settings()
host, port, database, username, password, collection, auth_db = (
ftrack_events_mongo_settings()
)
user_pass = ""
if username and password:
user_pass = "{}:{}@".format(username, password)
@ -97,3 +121,334 @@ def check_ftrack_url(url, log_errors=True):
print('DEBUG: Ftrack server {} is accessible.'.format(url))
return url
class StorerEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(StorerEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"storer")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(StorerEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class ProcessEventHub(ftrack_api.event.hub.EventHub):
url, database, table_name = get_ftrack_event_mongo_info()
is_table_created = False
pypelog = Logger().get_logger("Session Processor")
def __init__(self, *args, **kwargs):
self.dbcon = DbConnector(
mongo_url=self.url,
database_name=self.database,
table_name=self.table_name
)
self.sock = kwargs.pop("sock")
super(ProcessEventHub, self).__init__(*args, **kwargs)
def prepare_dbcon(self):
try:
self.dbcon.install()
self.dbcon._database.list_collection_names()
except pymongo.errors.AutoReconnect:
self.pypelog.error(
"Mongo server \"{}\" is not responding, exiting.".format(
os.environ["AVALON_MONGO"]
)
)
sys.exit(0)
except pymongo.errors.OperationFailure:
self.pypelog.error((
"Error with Mongo access, probably permissions."
"Check if exist database with name \"{}\""
" and collection \"{}\" inside."
).format(self.database, self.table_name))
self.sock.sendall(b"MongoError")
sys.exit(0)
def wait(self, duration=None):
"""Overriden wait
Event are loaded from Mongo DB when queue is empty. Handled event is
set as processed in Mongo DB.
"""
started = time.time()
self.prepare_dbcon()
while True:
try:
event = self._event_queue.get(timeout=0.1)
except queue.Empty:
if not self.load_events():
time.sleep(0.5)
else:
try:
self._handle(event)
self.dbcon.update_one(
{"id": event["id"]},
{"$set": {"pype_data.is_processed": True}}
)
except pymongo.errors.AutoReconnect:
self.pypelog.error((
"Mongo server \"{}\" is not responding, exiting."
).format(os.environ["AVALON_MONGO"]))
sys.exit(0)
# Additional special processing of events.
if event['topic'] == 'ftrack.meta.disconnected':
break
if duration is not None:
if (time.time() - started) > duration:
break
def load_events(self):
"""Load not processed events sorted by stored date"""
ago_date = datetime.datetime.now() - datetime.timedelta(days=3)
result = self.dbcon.delete_many({
"pype_data.stored": {"$lte": ago_date},
"pype_data.is_processed": True
})
not_processed_events = self.dbcon.find(
{"pype_data.is_processed": False}
).sort(
[("pype_data.stored", pymongo.ASCENDING)]
)
found = False
for event_data in not_processed_events:
new_event_data = {
k: v for k, v in event_data.items()
if k not in ["_id", "pype_data"]
}
try:
event = ftrack_api.event.base.Event(**new_event_data)
except Exception:
self.logger.exception(L(
'Failed to convert payload into event: {0}',
event_data
))
continue
found = True
self._event_queue.put(event)
return found
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which skip events and extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "event":
return
if code_name == "heartbeat":
self.sock.sendall(b"processor")
return self._send_packet(self._code_name_mapping["heartbeat"])
return super()._handle_packet(code, packet_identifier, path, data)
class UserEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(UserEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"hearbeat")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(UserEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class SocketSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None, sock=None, Eventhub=None
):
super(ftrack_api.session.Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = ftrack_api.session.SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
if Eventhub is None:
Eventhub = ftrack_api.event.hub.EventHub
self._event_hub = Eventhub(
self._server_url,
self._api_user,
self._api_key,
sock=sock
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub in (None, True):
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = (
auto_connect_event_hub is None
)
# Register to auto-close session on exit.
atexit.register(self.close)
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)

View file

@ -1,292 +0,0 @@
import logging
import os
import atexit
import datetime
import tempfile
import threading
import time
import requests
import queue
import pymongo
import ftrack_api
import ftrack_api.session
import ftrack_api.cache
import ftrack_api.operation
import ftrack_api._centralized_storage_scenario
import ftrack_api.event
from ftrack_api.logging import LazyLogMessage as L
from pype.ftrack.lib.custom_db_connector import DbConnector
from pype.ftrack.ftrack_server.lib import get_ftrack_event_mongo_info
from pypeapp import Logger
log = Logger().get_logger("Session processor")
class ProcessEventHub(ftrack_api.event.hub.EventHub):
url, database, table_name = get_ftrack_event_mongo_info()
is_table_created = False
def __init__(self, *args, **kwargs):
self.dbcon = DbConnector(
mongo_url=self.url,
database_name=self.database,
table_name=self.table_name
)
self.sock = kwargs.pop("sock")
super(ProcessEventHub, self).__init__(*args, **kwargs)
def prepare_dbcon(self):
try:
self.dbcon.install()
self.dbcon._database.list_collection_names()
except pymongo.errors.AutoReconnect:
log.error("Mongo server \"{}\" is not responding, exiting.".format(
os.environ["AVALON_MONGO"]
))
sys.exit(0)
except pymongo.errors.OperationFailure:
log.error((
"Error with Mongo access, probably permissions."
"Check if exist database with name \"{}\""
" and collection \"{}\" inside."
).format(self.database, self.table_name))
self.sock.sendall(b"MongoError")
sys.exit(0)
def wait(self, duration=None):
"""Overriden wait
Event are loaded from Mongo DB when queue is empty. Handled event is
set as processed in Mongo DB.
"""
started = time.time()
self.prepare_dbcon()
while True:
try:
event = self._event_queue.get(timeout=0.1)
except queue.Empty:
if not self.load_events():
time.sleep(0.5)
else:
try:
self._handle(event)
self.dbcon.update_one(
{"id": event["id"]},
{"$set": {"pype_data.is_processed": True}}
)
except pymongo.errors.AutoReconnect:
log.error((
"Mongo server \"{}\" is not responding, exiting."
).format(os.environ["AVALON_MONGO"]))
sys.exit(0)
# Additional special processing of events.
if event['topic'] == 'ftrack.meta.disconnected':
break
if duration is not None:
if (time.time() - started) > duration:
break
def load_events(self):
"""Load not processed events sorted by stored date"""
ago_date = datetime.datetime.now() - datetime.timedelta(days=3)
result = self.dbcon.delete_many({
"pype_data.stored": {"$lte": ago_date},
"pype_data.is_processed": True
})
not_processed_events = self.dbcon.find(
{"pype_data.is_processed": False}
).sort(
[("pype_data.stored", pymongo.ASCENDING)]
)
found = False
for event_data in not_processed_events:
new_event_data = {
k: v for k, v in event_data.items()
if k not in ["_id", "pype_data"]
}
try:
event = ftrack_api.event.base.Event(**new_event_data)
except Exception:
self.logger.exception(L(
'Failed to convert payload into event: {0}',
event_data
))
continue
found = True
self._event_queue.put(event)
return found
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which skip events and extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "event":
return
if code_name == "heartbeat":
self.sock.sendall(b"processor")
return self._send_packet(self._code_name_mapping["heartbeat"])
return super()._handle_packet(code, packet_identifier, path, data)
class ProcessSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None, sock=None
):
super(ftrack_api.session.Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = ftrack_api.session.SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ProcessEventHub(
self._server_url,
self._api_user,
self._api_key,
sock=sock
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub in (None, True):
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = (
auto_connect_event_hub is None
)
# Register to auto-close session on exit.
atexit.register(self.close)
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)

View file

@ -1,269 +0,0 @@
import logging
import os
import atexit
import tempfile
import threading
import requests
import ftrack_api
import ftrack_api.session
import ftrack_api.cache
import ftrack_api.operation
import ftrack_api._centralized_storage_scenario
import ftrack_api.event
from ftrack_api.logging import LazyLogMessage as L
class StorerEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(StorerEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"storer")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(StorerEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StorerSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None, sock=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(ftrack_api.session.Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = ftrack_api.session.SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = StorerEventHub(
self._server_url,
self._api_user,
self._api_key,
sock=sock
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub in (None, True):
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = (
auto_connect_event_hub is None
)
# Register to auto-close session on exit.
atexit.register(self.close)
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)

View file

@ -1,7 +1,5 @@
import os
import sys
import time
import signal
import socket
import threading
import subprocess
@ -10,7 +8,9 @@ from pypeapp import Logger
class SocketThread(threading.Thread):
"""Thread that checks suprocess of storer of processor of events"""
MAX_TIMEOUT = 35
def __init__(self, name, port, filepath):
super(SocketThread, self).__init__()
self.log = Logger().get_logger("SocketThread", "Event Thread")
@ -26,6 +26,8 @@ class SocketThread(threading.Thread):
self.mongo_error = False
self._temp_data = {}
def stop(self):
self._is_running = False
@ -81,8 +83,9 @@ class SocketThread(threading.Thread):
try:
if not self._is_running:
break
data = None
try:
data = connection.recv(16)
data = self.get_data_from_con(connection)
time_con = time.time()
except socket.timeout:
@ -99,10 +102,7 @@ class SocketThread(threading.Thread):
self._is_running = False
break
if data:
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)
self._handle_data(connection, data)
except Exception as exc:
self.log.error(
@ -121,3 +121,14 @@ class SocketThread(threading.Thread):
for line in lines:
os.write(1, line)
self.finished = True
def get_data_from_con(self, connection):
return connection.recv(16)
def _handle_data(self, connection, data):
if not data:
return
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)

View file

@ -1,12 +1,9 @@
import os
import sys
import datetime
import signal
import socket
import pymongo
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.session_processor import ProcessSession
from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub
from pypeapp import Logger
log = Logger().get_logger("Event processor")
@ -24,12 +21,14 @@ def main(args):
sock.sendall(b"CreatedProcess")
try:
session = ProcessSession(auto_connect_event_hub=True, sock=sock)
server = FtrackServer('event')
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub
)
server = FtrackServer("event")
log.debug("Launched Ftrack Event processor")
server.run_server(session)
except Exception as exc:
except Exception:
log.error("Event server crashed. See traceback below", exc_info=True)
finally:

View file

@ -7,22 +7,22 @@ import pymongo
import ftrack_api
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import get_ftrack_event_mongo_info
from pype.ftrack.ftrack_server.lib import (
get_ftrack_event_mongo_info,
SocketSession,
StorerEventHub
)
from pype.ftrack.lib.custom_db_connector import DbConnector
from session_storer import StorerSession
from pypeapp import Logger
log = Logger().get_logger("Event storer")
class SessionFactory:
session = None
url, database, table_name = get_ftrack_event_mongo_info()
class SessionClass:
def __init__(self):
self.session = None
session_obj = SessionClass()
dbcon = DbConnector(
mongo_url=url,
database_name=database,
@ -75,7 +75,11 @@ def launch(event):
def trigger_sync(event):
session = session_obj.session
session = SessionFactory.session
source_id = event.get("source", {}).get("id")
if not source_id or source_id != session.event_hub.id:
return
if session is None:
log.warning("Session is not set. Can't trigger Sync to avalon action.")
return True
@ -93,7 +97,7 @@ def trigger_sync(event):
"$set": {"pype_data.is_processed": True}
}
dbcon.update_many(query, set_dict)
selections = []
for project in projects:
if project["status"] != "active":
@ -154,8 +158,10 @@ def main(args):
sock.sendall(b"CreatedStore")
try:
session = StorerSession(auto_connect_event_hub=True, sock=sock)
session_obj.session = session
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=StorerEventHub
)
SessionFactory.session = session
register(session)
server = FtrackServer("event")
log.debug("Launched Ftrack Event storer")

View file

@ -1,4 +1,3 @@
import os
import sys
import time
import datetime
@ -7,7 +6,6 @@ import threading
from ftrack_server import FtrackServer
import ftrack_api
from ftrack_api.event.hub import EventHub
from pypeapp import Logger
log = Logger().get_logger("Event Server Legacy")
@ -37,7 +35,10 @@ class TimerChecker(threading.Thread):
if not self.session.event_hub.connected:
if not connected:
if (datetime.datetime.now() - start).seconds > self.max_time_out:
if (
(datetime.datetime.now() - start).seconds >
self.max_time_out
):
log.error((
"Exiting event server. Session was not connected"
" to ftrack server in {} seconds."
@ -61,7 +62,7 @@ class TimerChecker(threading.Thread):
def main(args):
check_thread = None
try:
server = FtrackServer('event')
server = FtrackServer("event")
session = ftrack_api.Session(auto_connect_event_hub=True)
check_thread = TimerChecker(server, session)

View file

@ -0,0 +1,51 @@
import sys
import signal
import socket
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub
from pypeapp import Logger
log = Logger().get_logger(__name__)
def main(args):
port = int(args[-1])
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ("localhost", port)
log.debug("Storer connected to {} port {}".format(*server_address))
sock.connect(server_address)
sock.sendall(b"CreatedUser")
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub
)
server = FtrackServer("action")
log.debug("Launched Ftrack Event storer")
server.run_server(session=session)
finally:
log.debug("Closing socket")
sock.close()
return 1
if __name__ == "__main__":
# Register interupt signal
def signal_handler(sig, frame):
log.info(
"Process was forced to stop. Process ended."
)
log.info("Process ended.")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
sys.exit(main(sys.argv))

View file

@ -314,6 +314,9 @@ class SyncEntitiesFactory:
self.log.warning(msg)
return {"success": False, "message": msg}
self.log.debug((
"*** Synchronization initialization started <{}>."
).format(project_full_name))
# Check if `avalon_mongo_id` custom attribute exist or is accessible
if CustAttrIdKey not in ft_project["custom_attributes"]:
items = []
@ -699,7 +702,7 @@ class SyncEntitiesFactory:
if ca_ent_type == "show":
avalon_attrs[ca_ent_type][key] = cust_attr["default"]
avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"]
else:
elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
avalon_attrs[obj_id][key] = cust_attr["default"]
avalon_attrs_ca_id[obj_id][key] = cust_attr["id"]
@ -708,7 +711,7 @@ class SyncEntitiesFactory:
if ca_ent_type == "show":
attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"]
attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"]
else:
elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
attrs_per_entity_type[obj_id][key] = cust_attr["default"]
attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"]

View file

@ -2,7 +2,7 @@ import functools
import time
from pypeapp import Logger
import ftrack_api
from pype.ftrack.ftrack_server import session_processor
from pype.ftrack.ftrack_server.lib import SocketSession
class MissingPermision(Exception):
@ -41,7 +41,7 @@ class BaseHandler(object):
self.log = Logger().get_logger(self.__class__.__name__)
if not(
isinstance(session, ftrack_api.session.Session) or
isinstance(session, session_processor.ProcessSession)
isinstance(session, SocketSession)
):
raise Exception((
"Session object entered with args is instance of \"{}\""

View file

@ -1,26 +1,27 @@
import os
import json
import threading
import time
from Qt import QtCore, QtGui, QtWidgets
import datetime
import threading
from Qt import QtCore, QtWidgets
import ftrack_api
from pypeapp import style
from pype.ftrack import FtrackServer, check_ftrack_url, credentials
from ..ftrack_server.lib import check_ftrack_url
from ..ftrack_server import socket_thread
from ..lib import credentials
from . import login_dialog
from pype import api as pype
from pypeapp import Logger
log = pype.Logger().get_logger("FtrackModule", "ftrack")
log = Logger().get_logger("FtrackModule", "ftrack")
class FtrackModule:
def __init__(self, main_parent=None, parent=None):
self.parent = parent
self.widget_login = login_dialog.Login_Dialog_ui(self)
self.action_server = FtrackServer('action')
self.thread_action_server = None
self.thread_socket_server = None
self.thread_timer = None
self.bool_logged = False
@ -75,14 +76,6 @@ class FtrackModule:
# Actions part
def start_action_server(self):
self.bool_action_thread_running = True
self.set_menu_visibility()
if (
self.thread_action_server is not None and
self.bool_action_thread_running is False
):
self.stop_action_server()
if self.thread_action_server is None:
self.thread_action_server = threading.Thread(
target=self.set_action_server
@ -90,35 +83,114 @@ class FtrackModule:
self.thread_action_server.start()
def set_action_server(self):
first_check = True
while self.bool_action_thread_running is True:
if not check_ftrack_url(os.environ['FTRACK_SERVER']):
if first_check:
log.warning(
"Could not connect to Ftrack server"
)
first_check = False
if self.bool_action_server_running:
return
self.bool_action_server_running = True
self.bool_action_thread_running = False
ftrack_url = os.environ['FTRACK_SERVER']
parent_file_path = os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
min_fail_seconds = 5
max_fail_count = 3
wait_time_after_max_fail = 10
# Threads data
thread_name = "ActionServerThread"
thread_port = 10021
subprocess_path = (
"{}/ftrack_server/sub_user_server.py".format(parent_file_path)
)
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
last_failed = datetime.datetime.now()
failed_count = 0
ftrack_accessible = False
printed_ftrack_error = False
# Main loop
while True:
if not self.bool_action_server_running:
log.debug("Action server was pushed to stop.")
break
# Check if accessible Ftrack and Mongo url
if not ftrack_accessible:
ftrack_accessible = check_ftrack_url(ftrack_url)
# Run threads only if Ftrack is accessible
if not ftrack_accessible:
if not printed_ftrack_error:
log.warning("Can't access Ftrack {}".format(ftrack_url))
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
self.bool_action_thread_running = False
self.set_menu_visibility()
printed_ftrack_error = True
time.sleep(1)
continue
log.info(
"Connected to Ftrack server. Running actions session"
)
try:
self.bool_action_server_running = True
printed_ftrack_error = False
# Run backup thread which does not requeire mongo to work
if self.thread_socket_server is None:
if failed_count < max_fail_count:
self.thread_socket_server = socket_thread.SocketThread(
thread_name, thread_port, subprocess_path
)
self.thread_socket_server.start()
self.bool_action_thread_running = True
self.set_menu_visibility()
elif failed_count == max_fail_count:
log.warning((
"Action server failed {} times."
" I'll try to run again {}s later"
).format(
str(max_fail_count), str(wait_time_after_max_fail))
)
failed_count += 1
elif ((
datetime.datetime.now() - last_failed
).seconds > wait_time_after_max_fail):
failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not self.thread_socket_server.isAlive():
self.thread_socket_server_thread.join()
self.thread_socket_server = None
ftrack_accessible = False
self.bool_action_thread_running = False
self.set_menu_visibility()
self.action_server.run_server()
if self.bool_action_thread_running:
log.debug("Ftrack action server has stopped")
except Exception:
log.warning(
"Ftrack Action server crashed. Trying to connect again",
exc_info=True
)
self.bool_action_server_running = False
self.set_menu_visibility()
first_check = True
_last_failed = datetime.datetime.now()
delta_time = (_last_failed - last_failed).seconds
if delta_time < min_fail_seconds:
failed_count += 1
else:
failed_count = 0
last_failed = _last_failed
time.sleep(1)
self.bool_action_thread_running = False
self.bool_action_server_running = False
self.set_menu_visibility()
def reset_action_server(self):
self.stop_action_server()
@ -126,16 +198,18 @@ class FtrackModule:
def stop_action_server(self):
try:
self.bool_action_thread_running = False
self.action_server.stop_session()
self.bool_action_server_running = False
if self.thread_socket_server is not None:
self.thread_socket_server.stop()
self.thread_socket_server.join()
self.thread_socket_server = None
if self.thread_action_server is not None:
self.thread_action_server.join()
self.thread_action_server = None
log.info("Ftrack action server was forced to stop")
self.bool_action_server_running = False
self.set_menu_visibility()
except Exception:
log.warning(
"Error has happened during Killing action server",
@ -201,9 +275,9 @@ class FtrackModule:
self.stop_timer_thread()
return
self.aRunActionS.setVisible(not self.bool_action_thread_running)
self.aRunActionS.setVisible(not self.bool_action_server_running)
self.aResetActionS.setVisible(self.bool_action_thread_running)
self.aStopActionS.setVisible(self.bool_action_thread_running)
self.aStopActionS.setVisible(self.bool_action_server_running)
if self.bool_timer_event is False:
self.start_timer_thread()

View file

@ -14,24 +14,38 @@ log = logging.getLogger(__name__)
# Special naming case for subprocess since its a built-in method.
def _subprocess(args):
def _subprocess(*args, **kwargs):
"""Convenience method for getting output errors for subprocess."""
# make sure environment contains only strings
env = {k: str(v) for k, v in os.environ.items()}
if not kwargs.get("env"):
filtered_env = {k: str(v) for k, v in os.environ.items()}
else:
filtered_env = {k: str(v) for k, v in kwargs.get("env").items()}
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
env=env
)
# set overrides
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
kwargs['stderr'] = kwargs.get('stderr', subprocess.STDOUT)
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
kwargs['env'] = filtered_env
output = proc.communicate()[0]
proc = subprocess.Popen(*args, **kwargs)
output, error = proc.communicate()
if output:
output = output.decode("utf-8")
output += "\n"
for line in output.strip().split("\n"):
log.info(line)
if error:
error = error.decode("utf-8")
error += "\n"
for line in error.strip().split("\n"):
log.error(line)
if proc.returncode != 0:
log.error(output)
raise ValueError("\"{}\" was not successful: {}".format(args, output))
return output
@ -182,9 +196,13 @@ def any_outdated():
if representation in checked:
continue
representation_doc = io.find_one({"_id": io.ObjectId(representation),
"type": "representation"},
projection={"parent": True})
representation_doc = io.find_one(
{
"_id": io.ObjectId(representation),
"type": "representation"
},
projection={"parent": True}
)
if representation_doc and not is_latest(representation_doc):
return True
elif not representation_doc:
@ -294,27 +312,38 @@ def switch_item(container,
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
asset = io.find_one({
"name": asset_name,
"type": "asset"
})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
subset = io.find_one({
"name": subset_name,
"type": "subset",
"parent": asset["_id"]
})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[('name', -1)]
)
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
representation = io.find_one({
"name": representation_name,
"type": "representation",
"parent": version["_id"]}
)
assert representation, ("Could not find representation in the database with"
" the name '%s'" % representation_name)
@ -352,7 +381,10 @@ def get_asset(asset_name=None):
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
asset_document = io.find_one({"name": asset_name, "type": "asset"})
asset_document = io.find_one({
"name": asset_name,
"type": "asset"
})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
@ -524,8 +556,7 @@ def get_subsets(asset_name,
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset",
"name": asset_name})
asset_io = io.find_one({"type": "asset", "name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
@ -549,14 +580,20 @@ def get_subsets(asset_name,
# Process subsets
for subset in subsets:
if not version:
version_sel = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version_sel = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
else:
assert isinstance(version, int), "version needs to be `int` type"
version_sel = io.find_one({"type": "version",
"parent": subset["_id"],
"name": int(version)})
version_sel = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": int(version)
})
find_dict = {"type": "representation",
"parent": version_sel["_id"]}

View file

@ -6,6 +6,7 @@ from collections import OrderedDict
from avalon import api, io, lib
import avalon.nuke
from avalon.nuke import lib as anlib
import pype.api as pype
import nuke
@ -706,9 +707,11 @@ class WorkfileSettings(object):
frame_start = int(data["frameStart"]) - handle_start
frame_end = int(data["frameEnd"]) + handle_end
self._root_node["lock_range"].setValue(False)
self._root_node["fps"].setValue(fps)
self._root_node["first_frame"].setValue(frame_start)
self._root_node["last_frame"].setValue(frame_end)
self._root_node["lock_range"].setValue(True)
# setting active viewers
try:
@ -1195,6 +1198,354 @@ class BuildWorkfile(WorkfileSettings):
def position_up(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
class ExporterReview:
"""
Base class object for generating review data from Nuke
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
_temp_nodes = []
data = dict({
"representations": list()
})
def __init__(self,
klass,
instance
):
self.log = klass.log
self.instance = instance
self.path_in = self.instance.data.get("path", None)
self.staging_dir = self.instance.data["stagingDir"]
self.collection = self.instance.data.get("collection", None)
def get_file_info(self):
if self.collection:
self.log.debug("Collection: `{}`".format(self.collection))
# get path
self.fname = os.path.basename(self.collection.format(
"{head}{padding}{tail}"))
self.fhead = self.collection.format("{head}")
# get first and last frame
self.first_frame = min(self.collection.indexes)
self.last_frame = max(self.collection.indexes)
if "slate" in self.instance.data["families"]:
self.first_frame += 1
else:
self.fname = os.path.basename(self.path_in)
self.fhead = os.path.splitext(self.fname)[0] + "."
self.first_frame = self.instance.data.get("frameStart", None)
self.last_frame = self.instance.data.get("frameEnd", None)
if "#" in self.fhead:
self.fhead = self.fhead.replace("#", "")[:-1]
def get_representation_data(self, tags=None, range=False):
add_tags = []
if tags:
add_tags = tags
repre = {
'name': self.name,
'ext': self.ext,
'files': self.file,
"stagingDir": self.staging_dir,
"anatomy_template": "publish",
"tags": [self.name.replace("_", "-")] + add_tags
}
if range:
repre.update({
"frameStart": self.first_frame,
"frameEnd": self.last_frame,
})
self.data["representations"].append(repre)
def get_view_process_node(self):
"""
Will get any active view process.
Arguments:
self (class): in object definition
Returns:
nuke.Node: copy node of Input Process node
"""
anlib.reset_selection()
ipn_orig = None
for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]:
ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
# copy selected to clipboard
nuke.nodeCopy('%clipboard%')
# reset selection
anlib.reset_selection()
# paste node and selection is on it only
nuke.nodePaste('%clipboard%')
# assign to variable
ipn = nuke.selectedNode()
return ipn
def clean_nodes(self):
for node in self._temp_nodes:
nuke.delete(node)
self.log.info("Deleted nodes...")
class ExporterReviewLut(ExporterReview):
"""
Generator object for review lut from Nuke
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
def __init__(self,
klass,
instance,
name=None,
ext=None,
cube_size=None,
lut_size=None,
lut_style=None):
# initialize parent class
ExporterReview.__init__(self, klass, instance)
# deal with now lut defined in viewer lut
if hasattr(klass, "viewer_lut_raw"):
self.viewer_lut_raw = klass.viewer_lut_raw
else:
self.viewer_lut_raw = False
self.name = name or "baked_lut"
self.ext = ext or "cube"
self.cube_size = cube_size or 32
self.lut_size = lut_size or 1024
self.lut_style = lut_style or "linear"
# set frame start / end and file name to self
self.get_file_info()
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")
def generate_lut(self):
# ---------- start nodes creation
# CMSTestPattern
cms_node = nuke.createNode("CMSTestPattern")
cms_node["cube_size"].setValue(self.cube_size)
# connect
self._temp_nodes.append(cms_node)
self.previous_node = cms_node
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
# Node View Process
ipn = self.get_view_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes.append(ipn)
self.previous_node = ipn
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
# GenerateLUT
gen_lut_node = nuke.createNode("GenerateLUT")
gen_lut_node["file"].setValue(self.path)
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
gen_lut_node["lut1d"].setValue(self.lut_size)
gen_lut_node["style1d"].setValue(self.lut_style)
# connect
gen_lut_node.setInput(0, self.previous_node)
self._temp_nodes.append(gen_lut_node)
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
# ---------- end nodes creation
# Export lut file
nuke.execute(
gen_lut_node.name(),
int(self.first_frame),
int(self.first_frame))
self.log.info("Exported...")
# ---------- generate representation data
self.get_representation_data()
self.log.debug("Representation... `{}`".format(self.data))
# ---------- Clean up
self.clean_nodes()
return self.data
class ExporterReviewMov(ExporterReview):
"""
Metaclass for generating review mov files
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
def __init__(self,
klass,
instance,
name=None,
ext=None,
):
# initialize parent class
ExporterReview.__init__(self, klass, instance)
# passing presets for nodes to self
if hasattr(klass, "nodes"):
self.nodes = klass.nodes
else:
self.nodes = {}
# deal with now lut defined in viewer lut
if hasattr(klass, "viewer_lut_raw"):
self.viewer_lut_raw = klass.viewer_lut_raw
else:
self.viewer_lut_raw = False
self.name = name or "baked"
self.ext = ext or "mov"
# set frame start / end and file name to self
self.get_file_info()
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")
def render(self, render_node_name):
self.log.info("Rendering... ")
# Render Write node
nuke.execute(
render_node_name,
int(self.first_frame),
int(self.last_frame))
self.log.info("Rendered...")
def save_file(self):
import shutil
with anlib.maintained_selection():
self.log.info("Saving nodes as file... ")
# create nk path
path = os.path.splitext(self.path)[0] + ".nk"
# save file to the path
shutil.copyfile(self.instance.context.data["currentFile"], path)
self.log.info("Nodes exported...")
return path
def generate_mov(self, farm=False):
# ---------- start nodes creation
# Read node
r_node = nuke.createNode("Read")
r_node["file"].setValue(self.path_in)
r_node["first"].setValue(self.first_frame)
r_node["origfirst"].setValue(self.first_frame)
r_node["last"].setValue(self.last_frame)
r_node["origlast"].setValue(self.last_frame)
# connect
self._temp_nodes.append(r_node)
self.previous_node = r_node
self.log.debug("Read... `{}`".format(self._temp_nodes))
# View Process node
ipn = self.get_view_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes.append(ipn)
self.previous_node = ipn
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
if not self.viewer_lut_raw:
# OCIODisplay node
dag_node = nuke.createNode("OCIODisplay")
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
# Write node
write_node = nuke.createNode("Write")
self.log.debug("Path: {}".format(self.path))
write_node["file"].setValue(self.path)
write_node["file_type"].setValue(self.ext)
write_node["meta_codec"].setValue("ap4h")
write_node["mov64_codec"].setValue("ap4h")
write_node["mov64_write_timecode"].setValue(1)
write_node["raw"].setValue(1)
# connect
write_node.setInput(0, self.previous_node)
self._temp_nodes.append(write_node)
self.log.debug("Write... `{}`".format(self._temp_nodes))
# ---------- end nodes creation
# ---------- render or save to nk
if farm:
nuke.scriptSave()
path_nk = self.save_file()
self.data.update({
"bakeScriptPath": path_nk,
"bakeWriteNodeName": write_node.name(),
"bakeRenderPath": self.path
})
else:
self.render(write_node.name())
# ---------- generate representation data
self.get_representation_data(
tags=["review", "delete"],
range=True
)
self.log.debug("Representation... `{}`".format(self.data))
# ---------- Clean up
self.clean_nodes()
nuke.scriptSave()
return self.data
def get_dependent_nodes(nodes):
"""Get all dependent nodes connected to the list of nodes.
@ -1230,3 +1581,70 @@ def get_dependent_nodes(nodes):
})
return connections_in, connections_out
def find_free_space_to_paste_nodes(
nodes,
group=nuke.root(),
direction="right",
offset=300):
"""
For getting coordinates in DAG (node graph) for placing new nodes
Arguments:
nodes (list): list of nuke.Node objects
group (nuke.Node) [optional]: object in which context it is
direction (str) [optional]: where we want it to be placed
[left, right, top, bottom]
offset (int) [optional]: what offset it is from rest of nodes
Returns:
xpos (int): x coordinace in DAG
ypos (int): y coordinace in DAG
"""
if len(nodes) == 0:
return 0, 0
group_xpos = list()
group_ypos = list()
# get local coordinates of all nodes
nodes_xpos = [n.xpos() for n in nodes] + \
[n.xpos() + n.screenWidth() for n in nodes]
nodes_ypos = [n.ypos() for n in nodes] + \
[n.ypos() + n.screenHeight() for n in nodes]
# get complete screen size of all nodes to be placed in
nodes_screen_width = max(nodes_xpos) - min(nodes_xpos)
nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos)
# get screen size (r,l,t,b) of all nodes in `group`
with group:
group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \
[n.xpos() + n.screenWidth() for n in nuke.allNodes()
if n not in nodes]
group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \
[n.ypos() + n.screenHeight() for n in nuke.allNodes()
if n not in nodes]
# calc output left
if direction in "left":
xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset)
ypos = min(group_ypos)
return xpos, ypos
# calc output right
if direction in "right":
xpos = max(group_xpos) + abs(offset)
ypos = min(group_ypos)
return xpos, ypos
# calc output top
if direction in "top":
xpos = min(group_xpos)
ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset)
return xpos, ypos
# calc output bottom
if direction in "bottom":
xpos = min(group_xpos)
ypos = max(group_ypos) + abs(offset)
return xpos, ypos

View file

@ -22,19 +22,16 @@ def has_unsaved_changes():
def save_file(filepath):
file = os.path.basename(filepath)
project = hiero.core.projects()[-1]
# close `Untitled` project
if "Untitled" not in project.name():
log.info("Saving project: `{}`".format(project.name()))
if project:
log.info("Saving project: `{}` as '{}'".format(project.name(), file))
project.saveAs(filepath)
elif not project:
else:
log.info("Creating new project...")
project = hiero.core.newProject()
project.saveAs(filepath)
else:
log.info("Dropping `Untitled` project...")
return
def open_file(filepath):

View file

@ -0,0 +1,32 @@
"""Create a model asset."""
import bpy
from avalon import api
from avalon.blender import Creator, lib
class CreateModel(Creator):
"""Polygonal static geometry"""
name = "modelMain"
label = "Model"
family = "model"
icon = "cube"
def process(self):
import pype.blender
asset = self.data["asset"]
subset = self.data["subset"]
name = pype.blender.plugin.model_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,315 @@
"""Load a model asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import avalon.blender.pipeline
import bpy
import pype.blender
from avalon import api
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
class BlendModelLoader(pype.blender.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["model"]
representations = ["blend"]
label = "Link Model"
icon = "code-fork"
color = "orange"
@staticmethod
def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]:
"""Find the collection(s) with name, loaded from libpath.
Note:
It is assumed that only 1 matching collection is found.
"""
for collection in bpy.data.collections:
if collection.name != name:
continue
if collection.library is None:
continue
if not collection.library.filepath:
continue
collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve())
normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve())
if collection_lib_path == normalized_libpath:
return collection
return None
@staticmethod
def _collection_contains_object(
collection: bpy.types.Collection, object: bpy.types.Object
) -> bool:
"""Check if the collection contains the object."""
for obj in collection.objects:
if obj == object:
return True
return False
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.blender.plugin.model_name(asset, subset)
container_name = pype.blender.plugin.model_name(
asset, subset, namespace
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
if not instance_empty.get("avalon"):
instance_empty["avalon"] = dict()
avalon_info = instance_empty["avalon"]
avalon_info.update({"container_name": container_name})
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
container = bpy.data.collections[lib_container]
container.name = container_name
instance_empty.instance_collection = container
container.make_local()
avalon.blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
nodes = list(container.objects)
nodes.append(container)
nodes.append(instance_empty)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.debug(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_libpath = (
self._get_library_from_container(collection).filepath
)
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
# Let Blender's garbage collection take care of removing the library
# itself after removing the objects.
objects_to_remove = set()
collection_objects = list()
collection_objects[:] = collection.objects
for obj in collection_objects:
# Unlink every object
collection.objects.unlink(obj)
remove_obj = True
for coll in [
coll for coll in bpy.data.collections
if coll != collection
]:
if (
coll.objects and
self._collection_contains_object(coll, obj)
):
remove_obj = False
if remove_obj:
objects_to_remove.add(obj)
for obj in objects_to_remove:
# Only delete objects that are not used elsewhere
bpy.data.objects.remove(obj)
instance_empties = [
obj for obj in collection.users_dupli_group
if obj.name in collection.name
]
if instance_empties:
instance_empty = instance_empties[0]
container_name = instance_empty["avalon"]["container_name"]
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [container_name]
new_collection = self._get_lib_collection(container_name, libpath)
if new_collection is None:
raise ValueError(
"A matching collection '{container_name}' "
"should have been found in: {libpath}"
)
for obj in new_collection.objects:
collection.objects.link(obj)
bpy.data.collections.remove(new_collection)
# Update the representation on the collection
avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY]
avalon_prop["representation"] = str(representation["_id"])
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
instance_parents = list(collection.users_dupli_group)
instance_objects = list(collection.objects)
for obj in instance_objects + instance_parents:
bpy.data.objects.remove(obj)
bpy.data.collections.remove(collection)
return True
class CacheModelLoader(pype.blender.AssetLoader):
"""Load cache models.
Stores the imported asset in a collection named after the asset.
Note:
At least for now it only supports Alembic files.
"""
families = ["model"]
representations = ["abc"]
label = "Link Model"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
raise NotImplementedError("Loading of Alembic files is not yet implemented.")
# TODO (jasper): implement Alembic import.
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
lib_container = container_name = (
pype.blender.plugin.model_name(asset, subset, namespace)
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (data_from, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
collection = bpy.data.collections[lib_container]
collection.name = container_name
instance_empty.instance_collection = collection
nodes = list(collection.objects)
nodes.append(collection)
nodes.append(instance_empty)
self[:] = nodes
return nodes

View file

@ -0,0 +1,16 @@
import bpy
import pyblish.api
class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Blender Current File"
hosts = ['blender']
def process(self, context):
"""Inject the current working file"""
current_file = bpy.data.filepath
context.data['currentFile'] = current_file

View file

@ -0,0 +1,53 @@
import typing
from typing import Generator
import bpy
import avalon.api
import pyblish.api
from avalon.blender.pipeline import AVALON_PROPERTY
class CollectModel(pyblish.api.ContextPlugin):
"""Collect the data of a model."""
hosts = ["blender"]
label = "Collect Model"
order = pyblish.api.CollectorOrder
@staticmethod
def get_model_collections() -> Generator:
"""Return all 'model' collections.
Check if the family is 'model' and if it doesn't have the
representation set. If the representation is set, it is a loaded model
and we don't want to publish it.
"""
for collection in bpy.data.collections:
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
if (avalon_prop.get('family') == 'model'
and not avalon_prop.get('representation')):
yield collection
def process(self, context):
"""Collect the models from the current Blender scene."""
collections = self.get_model_collections()
for collection in collections:
avalon_prop = collection[AVALON_PROPERTY]
asset = avalon_prop['asset']
family = avalon_prop['family']
subset = avalon_prop['subset']
task = avalon_prop['task']
name = f"{asset}_{subset}"
instance = context.create_instance(
name=name,
family=family,
families=[family],
subset=subset,
asset=asset,
task=task,
)
members = list(collection.objects)
members.append(collection)
instance[:] = members
self.log.debug(instance.data)

View file

@ -0,0 +1,47 @@
import os
import avalon.blender.workio
import pype.api
class ExtractModel(pype.api.Extractor):
"""Extract as model."""
label = "Model"
hosts = ["blender"]
families = ["model"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Just save the file to a temporary location. At least for now it's no
# problem to have (possibly) extra stuff in the file.
avalon.blender.workio.save_file(filepath, copy=True)
#
# # Store reference for integration
# if "files" not in instance.data:
# instance.data["files"] = list()
#
# # instance.data["files"].append(filename)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s", instance.name, representation)

View file

@ -0,0 +1,49 @@
from typing import List
import bpy
import pyblish.api
import pype.blender.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
label = "Mesh Has UV's"
actions = [pype.blender.action.SelectInvalidAction]
optional = True
@staticmethod
def has_uvs(obj: bpy.types.Object) -> bool:
"""Check if an object has uv's."""
if not obj.data.uv_layers:
return False
for uv_layer in obj.data.uv_layers:
for polygon in obj.data.polygons:
for loop_index in polygon.loop_indices:
if not uv_layer.data[loop_index].uv:
return False
return True
@classmethod
def get_invalid(cls, instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
# Make sure we are in object mode.
bpy.ops.object.mode_set(mode='OBJECT')
if not cls.has_uvs(obj):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}")

View file

@ -0,0 +1,35 @@
from typing import List
import bpy
import pyblish.api
import pype.blender.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
label = "Mesh No Negative Scale"
actions = [pype.blender.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
if any(v < 0 for v in obj.scale):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Meshes found in instance with negative scale: {invalid}"
)

View file

@ -144,8 +144,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
"version": 0,
"asset": asset_entity,
}
assetversion_data.update(data.get("assetversion_data", {}))
_assetversion_data = data.get("assetversion_data", {})
assetversion_cust_attrs = _assetversion_data.pop(
"custom_attributes", {}
)
assetversion_data.update(_assetversion_data)
assetversion_entity = session.query(
self.query("AssetVersion", assetversion_data)
@ -182,6 +185,22 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
existing_assetversion_metadata.update(assetversion_metadata)
assetversion_entity["metadata"] = existing_assetversion_metadata
# Adding Custom Attributes
for attr, val in assetversion_cust_attrs.items():
if attr in assetversion_entity["custom_attributes"]:
try:
assetversion_entity["custom_attributes"][attr] = val
session.commit()
continue
except Exception:
session.rollback()
self.log.warning((
"Custom Attrubute \"{0}\""
" is not available for AssetVersion <{1}>."
" Can't set it's value to: \"{2}\""
).format(attr, assetversion_entity["id"], str(val)))
# Have to commit the version and asset, because location can't
# determine the final location without.
try:

View file

@ -125,6 +125,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"thumbnail": comp['thumbnail']
}
# Add custom attributes for AssetVersion
assetversion_cust_attrs = {}
intent_val = instance.context.data.get("intent")
if intent_val:
assetversion_cust_attrs["intent"] = intent_val
component_item["assetversion_data"]["custom_attributes"] = (
assetversion_cust_attrs
)
componentList.append(component_item)
# Create copy with ftrack.unmanaged location if thumb or prev
if comp.get('thumbnail') or comp.get('preview') \

View file

@ -12,7 +12,6 @@ import os
import re
import copy
import json
from pprint import pformat
import pyblish.api
from avalon import api
@ -54,10 +53,6 @@ def collect(root,
patterns=[pattern],
minimum_items=1)
# Ignore any remainders
if remainder:
print("Skipping remainder {}".format(remainder))
# Exclude any frames outside start and end frame.
for collection in collections:
for index in list(collection.indexes):
@ -71,7 +66,7 @@ def collect(root,
# Keep only collections that have at least a single frame
collections = [c for c in collections if c.indexes]
return collections
return collections, remainder
class CollectRenderedFrames(pyblish.api.ContextPlugin):
@ -95,11 +90,21 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder - 0.0001
targets = ["filesequence"]
label = "RenderedFrames"
def process(self, context):
pixel_aspect = 1
resolution_width = 1920
resolution_height = 1080
lut_path = None
slate_frame = None
families_data = None
subset = None
version = None
frame_start = 0
frame_end = 0
if os.environ.get("PYPE_PUBLISH_PATHS"):
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
self.log.info("Collecting paths: {}".format(paths))
@ -117,12 +122,18 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
try:
data = json.load(f)
except Exception as exc:
self.log.error("Error loading json: "
"{} - Exception: {}".format(path, exc))
self.log.error(
"Error loading json: "
"{} - Exception: {}".format(path, exc)
)
raise
cwd = os.path.dirname(path)
root_override = data.get("root")
frame_start = int(data.get("frameStart"))
frame_end = int(data.get("frameEnd"))
subset = data.get("subset")
if root_override:
if os.path.isabs(root_override):
root = root_override
@ -144,6 +155,18 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
self.log.info("setting session using metadata")
api.Session.update(session)
os.environ.update(session)
instance = metadata.get("instance")
if instance:
instance_family = instance.get("family")
pixel_aspect = instance.get("pixelAspect", 1)
resolution_width = instance.get("resolutionWidth", 1920)
resolution_height = instance.get("resolutionHeight", 1080)
lut_path = instance.get("lutPath", None)
baked_mov_path = instance.get("bakeRenderPath")
families_data = instance.get("families")
slate_frame = instance.get("slateFrame")
version = instance.get("version")
else:
# Search in directory
@ -151,88 +174,273 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
root = path
self.log.info("Collecting: {}".format(root))
regex = data.get("regex")
if baked_mov_path:
regex = "^{}.*$".format(subset)
if regex:
self.log.info("Using regex: {}".format(regex))
collections = collect(root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
frame_start=data.get("frameStart"),
frame_end=data.get("frameEnd"))
if "slate" in families_data:
frame_start -= 1
collections, remainder = collect(
root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
frame_start=frame_start,
frame_end=frame_end,
)
self.log.info("Found collections: {}".format(collections))
if data.get("subset"):
# If subset is provided for this json then it must be a single
# collection.
if len(collections) > 1:
self.log.error("Forced subset can only work with a single "
"found sequence")
raise RuntimeError("Invalid sequence")
self.log.info("Found remainder: {}".format(remainder))
fps = data.get("fps", 25)
if data.get("user"):
context.data["user"] = data["user"]
if data.get("version"):
version = data.get("version")
# Get family from the data
families = data.get("families", ["render"])
if "render" not in families:
families.append("render")
if "ftrack" not in families:
families.append("ftrack")
if "review" not in families:
families.append("review")
if "write" in instance_family:
families.append("write")
if families_data and "slate" in families_data:
families.append("slate")
for collection in collections:
instance = context.create_instance(str(collection))
self.log.info("Collection: %s" % list(collection))
if data.get("attachTo"):
# we need to attach found collections to existing
# subset version as review represenation.
# Ensure each instance gets a unique reference to the data
for attach in data.get("attachTo"):
self.log.info(
"Attaching render {}:v{}".format(
attach["subset"], attach["version"]))
instance = context.create_instance(
attach["subset"])
instance.data.update(
{
"name": attach["subset"],
"version": attach["version"],
"family": 'review',
"families": ['review', 'ftrack'],
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height
})
if "representations" not in instance.data:
instance.data["representations"] = []
for collection in collections:
self.log.info(
" - adding representation: {}".format(
str(collection))
)
ext = collection.tail.lstrip(".")
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
}
instance.data["representations"].append(
representation)
elif subset:
# if we have subset - add all collections and known
# reminder as representations
# take out review family if mov path
# this will make imagesequence none review
if baked_mov_path:
self.log.info(
"Baked mov is available {}".format(
baked_mov_path))
families.append("review")
if session['AVALON_APP'] == "maya":
families.append("review")
self.log.info(
"Adding representations to subset {}".format(
subset))
instance = context.create_instance(subset)
data = copy.deepcopy(data)
# If no subset provided, get it from collection's head
subset = data.get("subset", collection.head.rstrip("_. "))
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = data.get("frameStart", indices[0])
end = data.get("frameEnd", indices[-1])
# root = os.path.normpath(root)
# self.log.info("Source: {}}".format(data.get("source", "")))
ext = list(collection)[0].split('.')[-1]
instance.data.update({
"name": str(collection),
"family": families[0], # backwards compatibility / pyblish
"families": list(families),
"subset": subset,
"asset": data.get("asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": start,
"frameEnd": end,
"fps": fps,
"source": data.get('source', '')
})
instance.append(collection)
instance.context.data['fps'] = fps
instance.data.update(
{
"name": subset,
"family": families[0],
"families": list(families),
"subset": subset,
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"slateFrame": slate_frame,
"version": version
}
)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': ext,
'ext': '{}'.format(ext),
'files': list(collection),
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ['review']
}
instance.data["representations"].append(representation)
for collection in collections:
self.log.info(" - {}".format(str(collection)))
if data.get('user'):
context.data["user"] = data['user']
ext = collection.tail.lstrip(".")
self.log.debug("Collected instance:\n"
"{}".format(pformat(instance.data)))
if "slate" in instance.data["families"]:
frame_start += 1
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": frame_start,
"frameEnd": frame_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"] if not baked_mov_path else [],
}
instance.data["representations"].append(
representation)
# filter out only relevant mov in case baked available
self.log.debug("__ remainder {}".format(remainder))
if baked_mov_path:
remainder = [r for r in remainder
if r in baked_mov_path]
self.log.debug("__ remainder {}".format(remainder))
# process reminders
for rem in remainder:
# add only known types to representation
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
self.log.info(" . {}".format(rem))
if "slate" in instance.data["families"]:
frame_start += 1
tags = ["review"]
if baked_mov_path:
tags.append("delete")
representation = {
"name": rem.split(".")[-1],
"ext": "{}".format(rem.split(".")[-1]),
"files": rem,
"stagingDir": root,
"frameStart": frame_start,
"anatomy_template": "render",
"fps": fps,
"tags": tags
}
instance.data["representations"].append(
representation)
else:
# we have no subset so we take every collection and create one
# from it
for collection in collections:
instance = context.create_instance(str(collection))
self.log.info("Creating subset from: %s" % str(collection))
# Ensure each instance gets a unique reference to the data
data = copy.deepcopy(data)
# If no subset provided, get it from collection's head
subset = data.get("subset", collection.head.rstrip("_. "))
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = data.get("frameStart", indices[0])
end = data.get("frameEnd", indices[-1])
ext = list(collection)[0].split(".")[-1]
if "review" not in families:
families.append("review")
instance.data.update(
{
"name": str(collection),
"family": families[0], # backwards compatibility
"families": list(families),
"subset": subset,
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": start,
"frameEnd": end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"version": version
}
)
if lut_path:
instance.data.update({"lutPath": lut_path})
instance.append(collection)
instance.context.data["fps"] = fps
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
}
instance.data["representations"].append(representation)
# temporary ... allow only beauty on ftrack
if session['AVALON_APP'] == "maya":
AOV_filter = ['beauty']
for aov in AOV_filter:
if aov not in instance.data['subset']:
instance.data['families'].remove('review')
instance.data['families'].remove('ftrack')
representation["tags"].remove('review')
self.log.debug(
"__ representations {}".format(
instance.data["representations"]))
self.log.debug(
"__ instance.data {}".format(instance.data))

View file

@ -31,32 +31,44 @@ class CollectTemplates(pyblish.api.InstancePlugin):
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
@ -75,8 +87,19 @@ class CollectTemplates(pyblish.api.InstancePlugin):
"asset": asset_name,
"subset": subset_name,
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
"hierarchy": hierarchy.replace("\\", "/"),
"representation": "TEMP")}
resolution_width = instance.data.get("resolutionWidth")
resolution_height = instance.data.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data
@ -85,3 +108,6 @@ class CollectTemplates(pyblish.api.InstancePlugin):
instance.data["assumedDestination"] = os.path.dirname(
(anatomy.format(template_data))["publish"]["path"]
)
self.log.info("Assumed Destination has been created...")
self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"]))
self.log.debug("__ template: `{}`".format(instance.data["template"]))

View file

@ -1,8 +1,10 @@
import os
import json
import copy
import pype.api
import pyblish
from pypeapp import config
class ExtractBurnin(pype.api.Extractor):
@ -24,14 +26,12 @@ class ExtractBurnin(pype.api.Extractor):
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
# TODO: expand burnin data list to include all usefull keys
version = ''
if instance.context.data.get('version'):
version = "v" + str(instance.context.data['version'])
version = instance.context.data.get(
'version', instance.data.get('version'))
frame_start = int(instance.data.get("frameStart") or 0)
frame_end = int(instance.data.get("frameEnd") or 1)
duration = frame_end - frame_start + 1
prep_data = {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
@ -39,8 +39,37 @@ class ExtractBurnin(pype.api.Extractor):
"frame_start": frame_start,
"frame_end": frame_end,
"duration": duration,
"version": version
"version": int(version),
"comment": instance.context.data.get("comment", ""),
"intent": instance.context.data.get("intent", "")
}
# Add datetime data to preparation data
prep_data.update(config.get_datetime_data())
slate_frame_start = frame_start
slate_frame_end = frame_end
slate_duration = duration
# exception for slate workflow
if "slate" in instance.data["families"]:
slate_frame_start = frame_start - 1
slate_frame_end = frame_end
slate_duration = slate_frame_end - slate_frame_start + 1
prep_data.update({
"slate_frame_start": slate_frame_start,
"slate_frame_end": slate_frame_end,
"slate_duration": slate_duration
})
# Update data with template data
template_data = instance.data.get("assumedTemplateData") or {}
prep_data.update(template_data)
# get anatomy project
anatomy = instance.context.data['anatomy']
self.log.debug("__ prep_data: {}".format(prep_data))
for i, repre in enumerate(instance.data["representations"]):
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
@ -52,7 +81,8 @@ class ExtractBurnin(pype.api.Extractor):
filename = "{0}".format(repre["files"])
name = "_burnin"
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
ext = os.path.splitext(filename)[1]
movieFileBurnin = filename.replace(ext, "") + name + ext
full_movie_path = os.path.join(
os.path.normpath(stagingdir), repre["files"]
@ -62,11 +92,17 @@ class ExtractBurnin(pype.api.Extractor):
)
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
# create copy of prep_data for anatomy formatting
_prep_data = copy.deepcopy(prep_data)
_prep_data["representation"] = repre["name"]
_prep_data["anatomy"] = (
anatomy.format_all(_prep_data).get("solved") or {}
)
burnin_data = {
"input": full_movie_path.replace("\\", "/"),
"codec": repre.get("codec", []),
"output": full_burnin_path.replace("\\", "/"),
"burnin_data": prep_data
"burnin_data": _prep_data
}
self.log.debug("__ burnin_data2: {}".format(burnin_data))

View file

@ -20,6 +20,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
hosts = ["shell"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
enabled = False
def process(self, instance):
start = instance.data.get("frameStart")
@ -28,51 +29,74 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
collected_frames = os.listdir(stagingdir)
collections, remainder = clique.assemble(collected_frames)
input_file = (
collections[0].format('{head}{padding}{tail}') % start
)
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
self.log.info("subset {}".format(instance.data['subset']))
if 'crypto' in instance.data['subset']:
return
filename = collections[0].format('{head}')
if not filename.endswith('.'):
filename += "."
jpegFile = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpegFile)
# get representation and loop them
representations = instance.data["representations"]
self.log.info("output {}".format(full_output_path))
# filter out mov and img sequences
representations_new = representations[:]
config_data = instance.context.data['output_repre_config']
for repre in representations:
self.log.debug(repre)
if 'review' not in repre['tags']:
return
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
input_file = repre['files'][0]
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start
# )
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
subprocess_jpeg = " ".join(jpeg_items)
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpegFile = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpegFile)
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
self.log.info("output {}".format(full_output_path))
if "representations" not in instance.data:
instance.data["representations"] = []
config_data = instance.context.data['output_repre_config']
representation = {
'name': 'jpg',
'ext': 'jpg',
'files': jpegFile,
"stagingDir": stagingdir,
"thumbnail": True
}
instance.data["representations"].append(representation)
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
subprocess_jpeg = " ".join(jpeg_items)
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'jpg',
'ext': 'jpg',
'files': jpegFile,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ['thumbnail']
}
# adding representation
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
instance.data["representations"] = representations_new

View file

@ -1,9 +1,7 @@
import os
import pyblish.api
import clique
import pype.api
from pypeapp import config
class ExtractReview(pyblish.api.InstancePlugin):
@ -22,27 +20,35 @@ class ExtractReview(pyblish.api.InstancePlugin):
families = ["review"]
hosts = ["nuke", "maya", "shell"]
outputs = {}
ext_filter = []
def process(self, instance):
# adding plugin attributes from presets
publish_presets = config.get_presets()["plugins"]["global"]["publish"]
plugin_attrs = publish_presets[self.__class__.__name__]
output_profiles = plugin_attrs.get("outputs", {})
to_width = 1920
to_height = 1080
output_profiles = self.outputs or {}
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("frameStart")
self.log.debug("Families In: `{}`".format(instance.data["families"]))
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(inst_data["families"]))
# get representation and loop them
representations = instance.data["representations"]
representations = inst_data["representations"]
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
if repre['ext'] in plugin_attrs["ext_filter"]:
if repre['ext'] in self.ext_filter:
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" in tags:
@ -54,10 +60,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
if not ext:
ext = "mov"
self.log.warning(
"`ext` attribute not in output profile. Setting to default ext: `mov`")
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug("instance.families: {}".format(instance.data['families']))
self.log.debug("profile.families: {}".format(profile['families']))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
if any(item in instance.data['families'] for item in profile['families']):
if isinstance(repre["files"], list):
@ -92,8 +102,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.info("p_tags: `{}`".format(p_tags))
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
@ -111,8 +122,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append("-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
@ -147,22 +159,135 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
output_args = []
output_args.extend(profile.get('codec', []))
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
# TODO: add to documentation
lb = profile.get('letter_box', None)
if lb:
output_args.append(
"-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
mov_args = [
os.path.join(
os.environ.get(
@ -185,7 +310,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": profile.get('codec', [])
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if repre_new.get('preview'):
repre_new.pop("preview")
@ -209,3 +337,40 @@ class ExtractReview(pyblish.api.InstancePlugin):
instance.data["representations"] = representations_new
self.log.debug("Families Out: `{}`".format(instance.data["families"]))
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Args:
args (list): list of string arguments
inserting_arg (str): string argument we want to add
(without flag `-vf`)
Returns:
str: long joined argument to be added back to list of arguments
"""
# find all video format settings
vf_settings = [p for p in args
for v in ["-filter:v", "-vf"]
if v in p]
self.log.debug("_ vf_settings: `{}`".format(vf_settings))
# remove them from output args list
for p in vf_settings:
self.log.debug("_ remove p: `{}`".format(p))
args.remove(p)
self.log.debug("_ args: `{}`".format(args))
# strip them from all flags
vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "")
for p in vf_settings]
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
vf_fixed.insert(0, inserting_arg)
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
# create new video filter setting
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back

View file

@ -0,0 +1,243 @@
import os
import pype.api
import pyblish
class ExtractReviewSlate(pype.api.Extractor):
"""
Will add slate frame at the start of the video files
"""
label = "Review with Slate frame"
order = pyblish.api.ExtractorOrder + 0.031
families = ["slate"]
hosts = ["nuke", "maya", "shell"]
optional = True
def process(self, instance):
inst_data = instance.data
if "representations" not in inst_data:
raise RuntimeError("Burnin needs already created mov to work on.")
suffix = "_slate"
slate_path = inst_data.get("slateFrame")
ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg")
to_width = 1920
to_height = 1080
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
fps = inst_data.get("fps")
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
for i, repre in enumerate(inst_data["representations"]):
_remove_at_end = []
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
p_tags = repre.get("tags", [])
if "slate-frame" not in p_tags:
continue
stagingdir = repre["stagingDir"]
input_file = "{0}".format(repre["files"])
ext = os.path.splitext(input_file)[1]
output_file = input_file.replace(ext, "") + suffix + ext
input_path = os.path.join(
os.path.normpath(stagingdir), repre["files"])
self.log.debug("__ input_path: {}".format(input_path))
_remove_at_end.append(input_path)
output_path = os.path.join(
os.path.normpath(stagingdir), output_file)
self.log.debug("__ output_path: {}".format(output_path))
input_args = []
output_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(repre["_profile"].get('input', []))
input_args.append("-loop 1 -i {}".format(slate_path))
input_args.extend([
"-r {}".format(fps),
"-t 0.04"]
)
# output args
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
# make sure colors are correct
output_args.extend([
"-vf scale=out_color_matrix=bt709",
"-color_primaries bt709",
"-color_trc bt709",
"-colorspace bt709"
])
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(height_half_pad))
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
slate_v_path = slate_path.replace(".png", ext)
output_args.append(slate_v_path)
_remove_at_end.append(slate_v_path)
slate_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
slate_subprcs_cmd = " ".join(slate_args)
# run slate generation subprocess
self.log.debug("Slate Executing: {}".format(slate_subprcs_cmd))
slate_output = pype.api.subprocess(slate_subprcs_cmd)
self.log.debug("Slate Output: {}".format(slate_output))
# create ffmpeg concat text file path
conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt"
conc_text_path = os.path.join(
os.path.normpath(stagingdir), conc_text_file)
_remove_at_end.append(conc_text_path)
self.log.debug("__ conc_text_path: {}".format(conc_text_path))
new_line = "\n"
with open(conc_text_path, "w") as conc_text_f:
conc_text_f.writelines([
"file {}".format(
slate_v_path.replace("\\", "/")),
new_line,
"file {}".format(input_path.replace("\\", "/"))
])
# concat slate and videos together
conc_input_args = ["-y", "-f concat", "-safe 0"]
conc_input_args.append("-i {}".format(conc_text_path))
conc_output_args = ["-c copy"]
conc_output_args.append(output_path)
concat_args = [
ffmpeg_path,
" ".join(conc_input_args),
" ".join(conc_output_args)
]
concat_subprcs_cmd = " ".join(concat_args)
# ffmpeg concat subprocess
self.log.debug("Executing concat: {}".format(concat_subprcs_cmd))
concat_output = pype.api.subprocess(concat_subprcs_cmd)
self.log.debug("Output concat: {}".format(concat_output))
self.log.debug("__ repre[tags]: {}".format(repre["tags"]))
repre_update = {
"files": output_file,
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]
}
inst_data["representations"][i].update(repre_update)
self.log.debug(
"_ representation {}: `{}`".format(
i, inst_data["representations"][i]))
# removing temp files
for f in _remove_at_end:
os.remove(f)
self.log.debug("Removed: `{}`".format(f))
# Remove any representations tagged for deletion.
for repre in inst_data.get("representations", []):
if "delete" in repre.get("tags", []):
self.log.debug("Removing representation: {}".format(repre))
inst_data["representations"].remove(repre)
self.log.debug(inst_data["representations"])
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter argumets to be one long string
Args:
args (list): list of string arguments
inserting_arg (str): string argument we want to add
(without flag `-vf`)
Returns:
str: long joined argument to be added back to list of arguments
"""
# find all video format settings
vf_settings = [p for p in args
for v in ["-filter:v", "-vf"]
if v in p]
self.log.debug("_ vf_settings: `{}`".format(vf_settings))
# remove them from output args list
for p in vf_settings:
self.log.debug("_ remove p: `{}`".format(p))
args.remove(p)
self.log.debug("_ args: `{}`".format(args))
# strip them from all flags
vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "")
for p in vf_settings]
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
vf_fixed.insert(0, inserting_arg)
self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
# create new video filter setting
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back

View file

@ -84,9 +84,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -94,10 +96,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -318,9 +324,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]

View file

@ -82,31 +82,40 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:

View file

@ -7,6 +7,7 @@ import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
@ -71,7 +72,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"yetiRig",
"yeticache",
"nukenodes",
"gizmo"
"gizmo",
"source",
"matchmove",
"image"
@ -152,9 +153,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
io.install()
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -162,10 +165,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -174,16 +181,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
# self.log.info("Verifying version from assumed destination")
# assumed_data = instance.data["assumedTemplateData"]
# assumed_version = assumed_data["version"]
# if assumed_version != next_version:
# raise AttributeError("Assumed version 'v{0:03d}' does not match"
# "next version in database "
# "('v{1:03d}')".format(assumed_version,
# next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
@ -269,6 +266,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"version": int(version["name"]),
"hierarchy": hierarchy}
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
files = repre['files']
if repre.get('stagingDir'):
stagingdir = repre['stagingDir']
@ -322,6 +330,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
repre.get("frameEnd")))
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
@ -356,7 +368,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_head,
dst_start_frame,
dst_tail).replace("..", ".")
repre['published_path'] = dst
repre['published_path'] = self.unc_convert(dst)
else:
# Single file
@ -385,7 +397,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data["transfers"].append([src, dst])
repre['published_path'] = dst
repre['published_path'] = self.unc_convert(dst)
self.log.debug("__ dst: {}".format(dst))
representation = {
@ -413,8 +425,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
}
}
if repre.get("outputName"):
representation["context"]["output"] = repre['outputName']
if sequence_repre and repre.get("frameStart"):
representation['context']['frame'] = src_padding_exp % repre.get("frameStart")
representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
@ -459,6 +474,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def unc_convert(self, path):
self.log.debug("> __ path: `{}`".format(path))
drive, _path = os.path.splitdrive(path)
self.log.debug("> __ drive, _path: `{}`, `{}`".format(drive, _path))
if not os.path.exists(drive + "/"):
self.log.info("Converting to unc from environments ..")
path_replace = os.getenv("PYPE_STUDIO_PROJECTS_PATH")
path_mount = os.getenv("PYPE_STUDIO_PROJECTS_MOUNT")
if "/" in path_mount:
path = path.replace(path_mount[0:-1], path_replace)
else:
path = path.replace(path_mount, path_replace)
return path
def copy_file(self, src, dst):
""" Copy given source to destination
@ -468,8 +500,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
Returns:
None
"""
src = os.path.normpath(src)
dst = os.path.normpath(dst)
src = self.unc_convert(src)
dst = self.unc_convert(dst)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)
@ -490,6 +522,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
src = self.unc_convert(src)
dst = self.unc_convert(dst)
try:
os.makedirs(dirname)
except OSError as e:
@ -502,9 +538,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
@ -595,7 +633,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
"fps": context.data.get(
"fps", instance.data.get("fps"))}
# Include optional data if present in
optionals = [

View file

@ -88,9 +88,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
@ -98,10 +100,14 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
@ -251,9 +257,6 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
@ -332,9 +335,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]

View file

@ -21,20 +21,34 @@ def _get_script():
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
module_path = os.path.normpath(module_path)
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
module_path = module_path.replace(mount_root, network_root)
return module_path
# Logic to retrieve latest files concerning extendFrames
def get_latest_version(asset_name, subset_name, family):
# Get asset
asset_name = io.find_one({"type": "asset",
"name": asset_name},
projection={"name": True})
asset_name = io.find_one(
{
"type": "asset",
"name": asset_name
},
projection={"name": True}
)
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]},
projection={"_id": True, "name": True})
subset = io.find_one(
{
"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]
},
projection={"_id": True, "name": True}
)
# Check if subsets actually exists (pre-run check)
assert subset, "No subsets found, please publish with `extendFrames` off"
@ -45,11 +59,15 @@ def get_latest_version(asset_name, subset_name, family):
"data.endFrame": True,
"parent": True}
version = io.find_one({"type": "version",
"parent": subset["_id"],
"data.families": family},
projection=version_projection,
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"],
"data.families": family
},
projection=version_projection,
sort=[("name", -1)]
)
assert version, "No version found, this is a bug"
@ -143,7 +161,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"PYPE_ROOT"
"PYPE_ROOT",
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT"
]
def _submit_deadline_post_job(self, instance, job):
@ -154,7 +174,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"""
data = instance.data.copy()
subset = data["subset"]
state = data.get("publishJobState", "Suspended")
job_name = "{batch} - {subset} [publish image sequence]".format(
batch=job["Props"]["Name"],
subset=subset
@ -164,6 +183,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
output_dir = instance.data["outputDir"]
metadata_path = os.path.join(output_dir, metadata_filename)
metadata_path = os.path.normpath(metadata_path)
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
network_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_PATH'])
metadata_path = metadata_path.replace(mount_root, network_root)
# Generate the payload for Deadline submission
payload = {
"JobInfo": {
@ -174,7 +199,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"InitialStatus": state,
"Priority": job["Props"]["Pri"]
},
"PluginInfo": {
@ -192,6 +216,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# job so they use the same environment
environment = job["Props"].get("Env", {})
i = 0
for index, key in enumerate(environment):
self.log.info("KEY: {}".format(key))
@ -282,6 +307,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
# find subsets and version to attach render to
attach_to = instance.data.get("attachTo")
attach_subset_versions = []
if attach_to:
for subset in attach_to:
for instance in context:
if instance.data["subset"] != subset["subset"]:
continue
attach_subset_versions.append(
{"version": instance.data["version"],
"subset": subset["subset"],
"family": subset["family"]})
# Write metadata for publish job
metadata = {
"asset": asset,
@ -293,6 +331,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"source": source,
"user": context.data["user"],
"version": context.data["version"],
"intent": context.data.get("intent"),
"comment": context.data.get("comment"),
# Optional metadata (for debugging)
"metadata": {
"instance": data,
@ -301,6 +341,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
}
}
if api.Session["AVALON_APP"] == "nuke":
metadata['subset'] = subset
if submission_type == "muster":
ftrack = {
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),

View file

@ -27,6 +27,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator):
return True
def process(self, instance):
self.log.info("ffmpeg path: `{}`".format(
os.environ.get("FFMPEG_PATH", "")))
if self.is_tool(
os.path.join(
os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False:

View file

@ -116,9 +116,11 @@ class LookLoader(pype.maya.plugin.ReferenceLoader):
shapes=True))
nodes = set(nodes_list)
json_representation = io.find_one({"type": "representation",
"parent": representation['parent'],
"name": "json"})
json_representation = io.find_one({
"type": "representation",
"parent": representation['parent'],
"name": "json"
})
# Load relationships
shader_relation = api.get_representation_path(json_representation)

View file

@ -21,15 +21,17 @@ class CollectAssData(pyblish.api.InstancePlugin):
objsets = instance.data['setMembers']
for objset in objsets:
objset = str(objset)
members = cmds.sets(objset, query=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
if objset == "content_SET":
if "content_SET" in objset:
instance.data['setMembers'] = members
elif objset == "proxy_SET":
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
assert len(members) == 1, "You have multiple proxy meshes, please only use one"
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
self.log.debug("data: {}".format(instance.data))

View file

@ -219,10 +219,6 @@ class CollectLook(pyblish.api.InstancePlugin):
with lib.renderlayer(instance.data["renderlayer"]):
self.collect(instance)
# make ftrack publishable
self.maketx = instance.data.get('maketx', True)
instance.data['maketx'] = self.maketx
self.log.info('maketx: {}'.format(self.maketx))
def collect(self, instance):

View file

@ -119,11 +119,15 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
texture_filenames = []
if image_search_paths:
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
# Later on check whether this is pipeline OS cross-compatible.
image_search_paths = [p for p in
image_search_paths.split(os.path.pathsep) if p]
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
image_search_paths = self._replace_tokens(image_search_paths)
# List all related textures
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
self.log.info("Found %i texture(s)" % len(texture_filenames))
@ -140,6 +144,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
"atttribute'" % node)
# Collect all texture files
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
texture_filenames = self._replace_tokens(texture_filenames)
for texture in texture_filenames:
files = []
@ -283,3 +289,20 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
collection, remainder = clique.assemble(files, patterns=pattern)
return collection
def _replace_tokens(self, strings):
env_re = re.compile(r"\$\{(\w+)\}")
replaced = []
for s in strings:
matches = re.finditer(env_re, s)
for m in matches:
try:
s = s.replace(m.group(), os.environ[m.group(1)])
except KeyError:
msg = "Cannot find requested {} in environment".format(
m.group(1))
self.log.error(msg)
raise RuntimeError(msg)
replaced.append(s)
return replaced

View file

@ -17,6 +17,7 @@ class ExtractAssStandin(pype.api.Extractor):
label = "Ass Standin (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
@ -47,7 +48,7 @@ class ExtractAssStandin(pype.api.Extractor):
exported_files = cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=True,
asciiAss=self.asciiAss,
shadowLinks=True,
lightLinks=True,
boundingBox=True,
@ -59,13 +60,15 @@ class ExtractAssStandin(pype.api.Extractor):
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
else:
self.log.info("Extracting ass")
cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=True,
asciiAss=False,
shadowLinks=True,
lightLinks=True,
boundingBox=True
)
self.log.info("Extracted {}".format(filename))
filenames = filename
optionals = [
"frameStart", "frameEnd", "step", "handles",

View file

@ -74,6 +74,8 @@ def maketx(source, destination, *args):
cmd.extend(args)
cmd.extend(["-o", destination, source])
cmd = " ".join(cmd)
CREATE_NO_WINDOW = 0x08000000
kwargs = dict(args=cmd, stderr=subprocess.STDOUT)
@ -183,6 +185,7 @@ class ExtractLook(pype.api.Extractor):
transfers = list()
hardlinks = list()
hashes = dict()
forceCopy = instance.data.get("forceCopy", False)
self.log.info(files)
for filepath in files_metadata:
@ -195,20 +198,26 @@ class ExtractLook(pype.api.Extractor):
files_metadata[filepath]["color_space"] = "raw"
source, mode, hash = self._process_texture(
filepath, do_maketx, staging=dir_path, linearise=linearise
filepath,
do_maketx,
staging=dir_path,
linearise=linearise,
force=forceCopy
)
destination = self.resource_destination(instance,
source,
do_maketx)
# Force copy is specified.
if instance.data.get("forceCopy", False):
if forceCopy:
mode = COPY
if mode == COPY:
transfers.append((source, destination))
self.log.info('copying')
elif mode == HARDLINK:
hardlinks.append((source, destination))
self.log.info('hardlinking')
# Store the hashes from hash to destination to include in the
# database
@ -231,11 +240,12 @@ class ExtractLook(pype.api.Extractor):
# ensure after context it's still the original value.
color_space_attr = resource["node"] + ".colorSpace"
color_space = cmds.getAttr(color_space_attr)
if files_metadata[source]["color_space"] == "raw":
# set colorpsace to raw if we linearized it
color_space = "Raw"
# Remap file node filename to destination
attr = resource["attribute"]
remap[attr] = destinations[source]
remap[color_space_attr] = color_space
self.log.info("Finished remapping destinations ...")
@ -310,6 +320,12 @@ class ExtractLook(pype.api.Extractor):
# Source hash for the textures
instance.data["sourceHashes"] = hashes
"""
self.log.info("Returning colorspaces to their original values ...")
for attr, value in remap.items():
self.log.info(" - {}: {}".format(attr, value))
cmds.setAttr(attr, value, type="string")
"""
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
maya_path))
@ -330,7 +346,7 @@ class ExtractLook(pype.api.Extractor):
instance.data["assumedDestination"], "resources", basename + ext
)
def _process_texture(self, filepath, do_maketx, staging, linearise):
def _process_texture(self, filepath, do_maketx, staging, linearise, force):
"""Process a single texture file on disk for publishing.
This will:
1. Check whether it's already published, if so it will do hardlink
@ -352,7 +368,7 @@ class ExtractLook(pype.api.Extractor):
# If source has been published before with the same settings,
# then don't reprocess but hardlink from the original
existing = find_paths_by_hash(texture_hash)
if existing:
if existing and not force:
self.log.info("Found hash in database, preparing hardlink..")
source = next((p for p in existing if os.path.exists(p)), None)
if filepath:
@ -413,33 +429,42 @@ class ExtractLook(pype.api.Extractor):
a_template = anatomy.templates
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True},
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one(
{"type": "asset", "name": asset_name, "parent": project["_id"]}
)
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'").format(asset_name, project_name)
silo = asset.get("silo")
subset = io.find_one(
{"type": "subset", "name": subset_name, "parent": asset["_id"]}
)
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)]
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version

View file

@ -38,9 +38,13 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin):
invalid = list()
asset = instance.data['asset']
asset_data = io.find_one({"name": asset,
"type": "asset"},
projection={"_id": True})
asset_data = io.find_one(
{
"name": asset,
"type": "asset"
},
projection={"_id": True}
)
asset_id = str(asset_data['_id'])
# We do want to check the referenced nodes as we it might be

View file

@ -49,9 +49,10 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin):
"""Check if subset is registered in the database under the asset"""
asset = io.find_one({"type": "asset", "name": asset_name})
is_valid = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
is_valid = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
return is_valid

View file

@ -6,9 +6,6 @@ from pype import api as pype
import nuke
log = pype.Logger().get_logger(__name__, "nuke")
class CrateRead(avalon.nuke.Creator):
# change this to template preset
name = "ReadCopy"

View file

@ -1,8 +0,0 @@
# create publishable read node usually used for enabling version tracking
# also useful for sharing across shots or assets
# if read nodes are selected it will convert them to centainer
# if no read node selected it will create read node and offer browser to shot resource folder
# type movie > mov or imagesequence
# type still > matpaint .psd, .tif, .png,

View file

@ -1,22 +1,14 @@
from collections import OrderedDict
import avalon.api
import avalon.nuke
from pype import api as pype
from pype.nuke import plugin
from pypeapp import config
import nuke
log = pype.Logger().get_logger(__name__, "nuke")
class CreateWriteRender(plugin.PypeCreator):
# change this to template preset
name = "WriteRender"
label = "Create Write Render"
hosts = ["nuke"]
nClass = "write"
n_class = "write"
family = "render"
icon = "sign-out"
defaults = ["Main", "Mask"]
@ -27,14 +19,15 @@ class CreateWriteRender(plugin.PypeCreator):
data = OrderedDict()
data["family"] = self.family
data["families"] = self.nClass
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.log.info("self.data: '{}'".format(self.data))
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
@ -46,9 +39,11 @@ class CreateWriteRender(plugin.PypeCreator):
# use selection
if (self.options or {}).get("useSelection"):
nodes = nuke.selectedNodes()
nodes = self.nodes
assert len(nodes) == 1, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`")
assert len(nodes) < 2, self.log.error(
"Select only one node. The node you want to connect to, "
"or tick off `Use selection`")
selected_node = nodes[0]
inputs = [selected_node]
@ -69,7 +64,7 @@ class CreateWriteRender(plugin.PypeCreator):
# recreate new
write_data = {
"class": self.nClass,
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
@ -100,75 +95,117 @@ class CreateWriteRender(plugin.PypeCreator):
return write_node
#
# class CreateWritePrerender(avalon.nuke.Creator):
# # change this to template preset
# preset = "prerender"
#
# name = "WritePrerender"
# label = "Create Write Prerender"
# hosts = ["nuke"]
# family = "{}_write".format(preset)
# families = preset
# icon = "sign-out"
# defaults = ["Main", "Mask"]
#
# def __init__(self, *args, **kwargs):
# super(CreateWritePrerender, self).__init__(*args, **kwargs)
# self.presets = config.get_presets()['plugins']["nuke"]["create"].get(
# self.__class__.__name__, {}
# )
#
# data = OrderedDict()
#
# data["family"] = self.family.split("_")[1]
# data["families"] = self.families
#
# {data.update({k: v}) for k, v in self.data.items()
# if k not in data.keys()}
# self.data = data
#
# def process(self):
# self.name = self.data["subset"]
#
# instance = nuke.toNode(self.data["subset"])
# node = 'write'
#
# if not instance:
# write_data = {
# "class": node,
# "preset": self.preset,
# "avalon": self.data
# }
#
# if self.presets.get('fpath_template'):
# self.log.info("Adding template path from preset")
# write_data.update(
# {"fpath_template": self.presets["fpath_template"]}
# )
# else:
# self.log.info("Adding template path from plugin")
# write_data.update({
# "fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"})
#
# # get group node
# group_node = create_write_node(self.data["subset"], write_data)
#
# # open group node
# group_node.begin()
# for n in nuke.allNodes():
# # get write node
# if n.Class() in "Write":
# write_node = n
# group_node.end()
#
# # linking knobs to group property panel
# linking_knobs = ["first", "last", "use_limit"]
# for k in linking_knobs:
# lnk = nuke.Link_Knob(k)
# lnk.makeLink(write_node.name(), k)
# lnk.setName(k.replace('_', ' ').capitalize())
# lnk.clearFlag(nuke.STARTLINE)
# group_node.addKnob(lnk)
#
# return
class CreateWritePrerender(plugin.PypeCreator):
# change this to template preset
name = "WritePrerender"
label = "Create Write Prerender"
hosts = ["nuke"]
n_class = "write"
family = "prerender"
icon = "sign-out"
defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"]
def __init__(self, *args, **kwargs):
super(CreateWritePrerender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
from pype.nuke import lib as pnlib
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
assert len(nodes) < 2, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`")
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"class": self.n_class,
"families": [self.family],
"avalon": self.data
}
if self.presets.get('fpath_template'):
self.log.info("Adding template path from preset")
write_data.update(
{"fpath_template": self.presets["fpath_template"]}
)
else:
self.log.info("Adding template path from plugin")
write_data.update({
"fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"})
write_node = pnlib.create_write_node(
self.data["subset"],
write_data,
input=selected_node,
prenodes=[])
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
# open group node
write_node.begin()
for n in nuke.allNodes():
# get write node
if n.Class() in "Write":
w_node = n
write_node.end()
# add inner write node Tab
write_node.addKnob(nuke.Tab_Knob("WriteLinkedKnobs"))
# linking knobs to group property panel
linking_knobs = ["channels", "___", "first", "last", "use_limit"]
for k in linking_knobs:
if "___" in k:
write_node.addKnob(nuke.Text_Knob(''))
else:
lnk = nuke.Link_Knob(k)
lnk.makeLink(w_node.name(), k)
lnk.setName(k.replace('_', ' ').capitalize())
lnk.clearFlag(nuke.STARTLINE)
write_node.addKnob(lnk)
return write_node

View file

@ -0,0 +1,319 @@
from avalon import api, style, io
import nuke
import nukescripts
from pype.nuke import lib as pnlib
from avalon.nuke import lib as anlib
from avalon.nuke import containerise, update_container
reload(pnlib)
class LoadBackdropNodes(api.Loader):
"""Loading Published Backdrop nodes (workfile, nukenodes)"""
representations = ["nk"]
families = ["workfile", "nukenodes"]
label = "Iport Nuke Nodes"
order = 0
icon = "eye"
color = style.colors.light
node_color = "0x7533c1ff"
def load(self, context, name, namespace, data):
"""
Loading function to import .nk file into script and wrap
it on backdrop
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# Get mouse position
n = nuke.createNode("NoOp")
xcursor, ycursor = (n.xpos(), n.ypos())
anlib.reset_selection()
nuke.delete(n)
bdn_frame = 50
with anlib.maintained_selection():
# add group from nk
nuke.nodePaste(file)
# get all pasted nodes
new_nodes = list()
nodes = nuke.selectedNodes()
# get pointer position in DAG
xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame)
# reset position to all nodes and replace inputs and output
for n in nodes:
anlib.reset_selection()
xpos = (n.xpos() - xcursor) + xpointer
ypos = (n.ypos() - ycursor) + ypointer
n.setXYpos(xpos, ypos)
# replace Input nodes for dots
if n.Class() in "Input":
dot = nuke.createNode("Dot")
new_name = n.name().replace("INP", "DOT")
dot.setName(new_name)
dot["label"].setValue(new_name)
dot.setXYpos(xpos, ypos)
new_nodes.append(dot)
# rewire
dep = n.dependent()
for d in dep:
index = next((i for i, dpcy in enumerate(
d.dependencies())
if n is dpcy), 0)
d.setInput(index, dot)
# remove Input node
anlib.reset_selection()
nuke.delete(n)
continue
# replace Input nodes for dots
elif n.Class() in "Output":
dot = nuke.createNode("Dot")
new_name = n.name() + "_DOT"
dot.setName(new_name)
dot["label"].setValue(new_name)
dot.setXYpos(xpos, ypos)
new_nodes.append(dot)
# rewire
dep = next((d for d in n.dependencies()), None)
if dep:
dot.setInput(0, dep)
# remove Input node
anlib.reset_selection()
nuke.delete(n)
continue
else:
new_nodes.append(n)
# reselect nodes with new Dot instead of Inputs and Output
anlib.reset_selection()
anlib.select_nodes(new_nodes)
# place on backdrop
bdn = nukescripts.autoBackdrop()
# add frame offset
xpos = bdn.xpos() - bdn_frame
ypos = bdn.ypos() - bdn_frame
bdwidth = bdn["bdwidth"].value() + (bdn_frame*2)
bdheight = bdn["bdheight"].value() + (bdn_frame*2)
bdn["xpos"].setValue(xpos)
bdn["ypos"].setValue(ypos)
bdn["bdwidth"].setValue(bdwidth)
bdn["bdheight"].setValue(bdheight)
bdn["name"].setValue(object_name)
bdn["label"].setValue("Version tracked frame: \n`{}`\n\nPLEASE DO NOT REMOVE OR MOVE \nANYTHING FROM THIS FRAME!".format(object_name))
bdn["note_font_size"].setValue(20)
return containerise(
node=bdn,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = api.get_representation_path(representation).replace("\\", "/")
context = representation["context"]
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
with anlib.maintained_selection():
xpos = GN.xpos()
ypos = GN.ypos()
avalon_data = anlib.get_avalon_knob_data(GN)
nuke.delete(GN)
# add group from nk
nuke.nodePaste(file)
GN = nuke.selectedNode()
anlib.set_avalon_knob_data(GN, avalon_data)
GN.setXYpos(xpos, ypos)
GN["name"].setValue(object_name)
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd88467ff", 16))
else:
GN["tile_color"].setValue(int(self.node_color, 16))
self.log.info("udated to version: {}".format(version.get("name")))
return update_container(GN, data_imprint)
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
self.log.error("Please create Viewer node before you "
"run this action again")
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
pnlib.create_backdrop(label="Input Process", layer=2,
nodes=[viewer, group_node], color="0x7c7faaff")
return True
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -13,8 +13,10 @@ class CollectAssetInfo(pyblish.api.ContextPlugin):
]
def process(self, context):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
asset_data = io.find_one({
"type": "asset",
"name": api.Session["AVALON_ASSET"]
})
self.log.info("asset_data: {}".format(asset_data))
context.data['handles'] = int(asset_data["data"].get("handles", 0))

View file

@ -15,13 +15,16 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
hosts = ["nuke", "nukeassist"]
def process(self, context):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
asset_data = io.find_one({
"type": "asset",
"name": api.Session["AVALON_ASSET"]
})
self.log.debug("asset_data: {}".format(asset_data["data"]))
instances = []
root = nuke.root()
self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes()))
for node in nuke.allNodes():
@ -30,11 +33,11 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
continue
except Exception as E:
self.log.warning(E)
continue
# get data from avalon knob
self.log.debug("node[name]: {}".format(node['name'].value()))
avalon_knob_data = get_avalon_knob_data(node)
avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"])
self.log.debug("avalon_knob_data: {}".format(avalon_knob_data))
@ -84,10 +87,24 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
node.end()
family = avalon_knob_data["family"]
families = [avalon_knob_data["families"]]
families = list()
families_ak = avalon_knob_data.get("families")
if families_ak:
families.append(families_ak)
else:
families.append(family)
# Get format
format = root['format'].value()
resolution_width = format.width()
resolution_height = format.height()
pixel_aspect = format.pixelAspect()
if node.Class() not in "Read":
if node["render"].value():
if "render" not in node.knobs().keys():
pass
elif node["render"].value():
self.log.info("flagged for render")
add_family = "render.local"
# dealing with local/farm rendering
@ -111,7 +128,10 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
"avalonKnob": avalon_knob_data,
"publish": node.knob('publish').value(),
"step": 1,
"fps": nuke.root()['fps'].value()
"fps": nuke.root()['fps'].value(),
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect,
})
@ -119,5 +139,4 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
instances.append(instance)
context.data["instances"] = instances
self.log.debug("context: {}".format(context))

View file

@ -0,0 +1,22 @@
import os
import pype.api as pype
import pyblish.api
class CollectScriptVersion(pyblish. api.ContextPlugin):
"""Collect Script Version."""
order = pyblish.api.CollectorOrder
label = "Collect Script Version"
hosts = [
"nuke",
"nukeassist"
]
def process(self, context):
file_path = context.data["currentFile"]
base_name = os.path.basename(file_path)
# get version string
version = pype.get_version_from_path(base_name)
context.data['version'] = version

View file

@ -0,0 +1,40 @@
import pyblish.api
import nuke
class CollectSlate(pyblish.api.InstancePlugin):
"""Check if SLATE node is in scene and connected to rendering tree"""
order = pyblish.api.CollectorOrder + 0.09
label = "Collect Slate Node"
hosts = ["nuke"]
families = ["write"]
def process(self, instance):
node = instance[0]
slate = next((n for n in nuke.allNodes()
if "slate" in n.name().lower()
if not n["disable"].getValue()),
None)
if slate:
# check if slate node is connected to write node tree
slate_check = 0
slate_node = None
while slate_check == 0:
try:
node = node.dependencies()[0]
if slate.name() in node.name():
slate_node = node
slate_check = 1
except IndexError:
break
if slate_node:
instance.data["slateNode"] = slate_node
instance.data["families"].append("slate")
self.log.info(
"Slate node is in node graph: `{}`".format(slate.name()))
self.log.debug(
"__ instance: `{}`".format(instance))

View file

@ -2,8 +2,6 @@ import nuke
import pyblish.api
import os
import pype.api as pype
from avalon.nuke import (
get_avalon_knob_data,
add_publish_knob
@ -11,7 +9,7 @@ from avalon.nuke import (
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Publish current script version."""
"""Collect current script for publish."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Workfile"
@ -31,9 +29,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
base_name = os.path.basename(file_path)
subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family)
# get version string
version = pype.get_version_from_path(base_name)
# Get frame range
first_frame = int(root["first_frame"].getValue())
last_frame = int(root["last_frame"].getValue())
@ -53,7 +48,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
script_data = {
"asset": os.getenv("AVALON_ASSET", None),
"version": version,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"resolutionWidth": resolution_width,

View file

@ -11,7 +11,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Writes"
hosts = ["nuke", "nukeassist"]
families = ["render", "render.local", "render.farm"]
families = ["write"]
def process(self, instance):
@ -50,9 +50,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# get version
version = pype.get_version_from_path(nuke.root().name())
instance.data['version'] = version
# get version to instance for integration
instance.data['version'] = instance.context.data.get(
"version", pype.get_version_from_path(nuke.root().name()))
self.log.debug('Write Version: %s' % instance.data('version'))
# create label
@ -76,7 +77,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
}
try:
collected_frames = os.listdir(output_dir)
collected_frames = [f for f in os.listdir(output_dir)
if ext in f]
if collected_frames:
representation['frameStart'] = "%0{}d".format(
len(str(last_frame))) % first_frame
@ -93,13 +95,14 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"version": int(version),
"version": int(instance.data['version']),
"colorspace": node["colorspace"].value(),
"families": [instance.data["family"]] + instance.data["families"],
"families": [instance.data["family"]],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"
group_node = [x for x in instance if x.Class() == "Group"][0]
deadlineChunkSize = 1
if "deadlineChunkSize" in group_node.knobs():
@ -109,6 +112,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
if "deadlinePriority" in group_node.knobs():
deadlinePriority = group_node["deadlinePriority"].value()
families = [f for f in instance.data["families"] if "write" not in f]
instance.data.update({
"versionData": version_data,
"path": path,
@ -119,6 +123,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"frameStart": first_frame,
"frameEnd": last_frame,
"outputType": output_type,
"family": "write",
"families": families,
"colorspace": node["colorspace"].value(),
"deadlineChunkSize": deadlineChunkSize,
"deadlinePriority": deadlinePriority,

View file

@ -28,6 +28,11 @@ class NukeRenderLocal(pype.api.Extractor):
self.log.debug("instance collected: {}".format(instance.data))
first_frame = instance.data.get("frameStart", None)
# exception for slate workflow
if "slate" in instance.data["families"]:
first_frame -= 1
last_frame = instance.data.get("frameEnd", None)
node_subset_name = instance.data.get("name", None)
@ -47,6 +52,10 @@ class NukeRenderLocal(pype.api.Extractor):
int(last_frame)
)
# exception for slate workflow
if "slate" in instance.data["families"]:
first_frame += 1
path = node['file'].value()
out_dir = os.path.dirname(path)
ext = node["file_type"].value()

View file

@ -1,187 +0,0 @@
import os
import nuke
import pyblish.api
import pype
class ExtractReviewData(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Review Data"
families = ["review"]
hosts = ["nuke"]
def process(self, instance):
# Store selection
selection = [i for i in nuke.allNodes() if i["selected"].getValue()]
# Deselect all nodes to prevent external connections
[i["selected"].setValue(False) for i in nuke.allNodes()]
self.log.debug("creating staging dir:")
self.staging_dir(instance)
self.log.debug("instance: {}".format(instance))
self.log.debug("instance.data[families]: {}".format(
instance.data["families"]))
if "still" not in instance.data["families"]:
self.render_review_representation(instance,
representation="mov")
self.render_review_representation(instance,
representation="jpeg")
else:
self.render_review_representation(instance, representation="jpeg")
# Restore selection
[i["selected"].setValue(False) for i in nuke.allNodes()]
[i["selected"].setValue(True) for i in selection]
def render_review_representation(self,
instance,
representation="mov"):
assert instance.data['representations'][0]['files'], "Instance data files should't be empty!"
temporary_nodes = []
stagingDir = instance.data[
'representations'][0]["stagingDir"].replace("\\", "/")
self.log.debug("StagingDir `{0}`...".format(stagingDir))
collection = instance.data.get("collection", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
# get first and last frame
first_frame = min(collection.indexes)
last_frame = max(collection.indexes)
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
rnode = nuke.createNode("Read")
rnode["file"].setValue(
os.path.join(stagingDir, fname).replace("\\", "/"))
rnode["first"].setValue(first_frame)
rnode["origfirst"].setValue(first_frame)
rnode["last"].setValue(last_frame)
rnode["origlast"].setValue(last_frame)
temporary_nodes.append(rnode)
previous_node = rnode
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
reformat_node = nuke.createNode("Reformat")
ref_node = self.nodes.get("Reformat", None)
if ref_node:
for k, v in ref_node:
self.log.debug("k,v: {0}:{1}".format(k,v))
if isinstance(v, unicode):
v = str(v)
reformat_node[k].setValue(v)
reformat_node.setInput(0, previous_node)
previous_node = reformat_node
temporary_nodes.append(reformat_node)
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# create write node
write_node = nuke.createNode("Write")
if representation in "mov":
file = fhead + "baked.mov"
name = "baked"
path = os.path.join(stagingDir, file).replace("\\", "/")
self.log.debug("Path: {}".format(path))
instance.data["baked_colorspace_movie"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("mov")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
tags = ["review", "delete"]
elif representation in "jpeg":
file = fhead + "jpeg"
name = "thumbnail"
path = os.path.join(stagingDir, file).replace("\\", "/")
instance.data["thumbnail"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("jpeg")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
tags = ["thumbnail"]
# retime for
first_frame = int(last_frame) / 2
last_frame = int(last_frame) / 2
repre = {
'name': name,
'ext': representation,
'files': file,
"stagingDir": stagingDir,
"frameStart": first_frame,
"frameEnd": last_frame,
"anatomy_template": "render",
"tags": tags
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
self.log.debug("representations: {}".format(instance.data["representations"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)
def get_view_process_node(self):
# Select only the target node
if nuke.selectedNodes():
[n.setSelected(False) for n in nuke.selectedNodes()]
ipn_orig = None
for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]:
ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
nuke.nodeCopy('%clipboard%')
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
nuke.nodePaste('%clipboard%')
ipn = nuke.selectedNode()
return ipn

View file

@ -0,0 +1,59 @@
import os
import pyblish.api
from avalon.nuke import lib as anlib
from pype.nuke import lib as pnlib
import pype
reload(pnlib)
class ExtractReviewDataLut(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.005
label = "Extract Review Data Lut"
families = ["review"]
hosts = ["nuke"]
def process(self, instance):
families = instance.data["families"]
self.log.info("Creating staging dir...")
if "representations" in instance.data:
staging_dir = instance.data[
"representations"][0]["stagingDir"].replace("\\", "/")
instance.data["stagingDir"] = staging_dir
instance.data["representations"][0]["tags"] = ["review"]
else:
instance.data["representations"] = []
# get output path
render_path = instance.data['path']
staging_dir = os.path.normpath(os.path.dirname(render_path))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
# generate data
with anlib.maintained_selection():
exporter = pnlib.ExporterReviewLut(
self, instance
)
data = exporter.generate_lut()
# assign to representations
instance.data["lutPath"] = os.path.join(
exporter.stagingDir, exporter.file).replace("\\", "/")
instance.data["representations"] += data["representations"]
if "render.farm" in families:
instance.data["families"].remove("review")
instance.data["families"].remove("ftrack")
self.log.debug(
"_ lutPath: {}".format(instance.data["lutPath"]))
self.log.debug(
"_ representations: {}".format(instance.data["representations"]))

View file

@ -0,0 +1,62 @@
import os
import pyblish.api
from avalon.nuke import lib as anlib
from pype.nuke import lib as pnlib
import pype
reload(pnlib)
class ExtractReviewDataMov(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Review Data Mov"
families = ["review", "render", "render.local"]
hosts = ["nuke"]
def process(self, instance):
families = instance.data["families"]
self.log.info("Creating staging dir...")
if "representations" not in instance.data:
instance.data["representations"] = list()
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
# generate data
with anlib.maintained_selection():
exporter = pnlib.ExporterReviewMov(
self, instance)
if "render.farm" in families:
instance.data["families"].remove("review")
instance.data["families"].remove("ftrack")
data = exporter.generate_mov(farm=True)
self.log.debug(
"_ data: {}".format(data))
instance.data.update({
"bakeRenderPath": data.get("bakeRenderPath"),
"bakeScriptPath": data.get("bakeScriptPath"),
"bakeWriteNodeName": data.get("bakeWriteNodeName")
})
else:
data = exporter.generate_mov()
# assign to representations
instance.data["representations"] += data["representations"]
self.log.debug(
"_ representations: {}".format(instance.data["representations"]))

View file

@ -0,0 +1,154 @@
import os
import nuke
from avalon.nuke import lib as anlib
import pyblish.api
import pype
class ExtractSlateFrame(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Slate Frame"
families = ["slate"]
hosts = ["nuke"]
def process(self, instance):
if hasattr(self, "viewer_lut_raw"):
self.viewer_lut_raw = self.viewer_lut_raw
else:
self.viewer_lut_raw = False
with anlib.maintained_selection():
self.log.debug("instance: {}".format(instance))
self.log.debug("instance.data[families]: {}".format(
instance.data["families"]))
self.render_slate(instance)
def render_slate(self, instance):
node = instance[0] # group node
self.log.info("Creating staging dir...")
if "representations" not in instance.data:
instance.data["representations"] = list()
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
temporary_nodes = []
collection = instance.data.get("collection", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
# get first and last frame
first_frame = min(collection.indexes) - 1
if "slate" in instance.data["families"]:
first_frame += 1
last_frame = first_frame
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("frameStart", None) - 1
last_frame = first_frame
if "#" in fhead:
fhead = fhead.replace("#", "")[:-1]
previous_node = node
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
if not self.viewer_lut_raw:
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# create write node
write_node = nuke.createNode("Write")
file = fhead + "slate.png"
path = os.path.join(staging_dir, file).replace("\\", "/")
instance.data["slateFrame"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("png")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
# fill slate node with comments
self.add_comment_slate_node(instance)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
self.log.debug(
"slate frame path: {}".format(instance.data["slateFrame"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)
def get_view_process_node(self):
# Select only the target node
if nuke.selectedNodes():
[n.setSelected(False) for n in nuke.selectedNodes()]
ipn_orig = None
for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]:
ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
nuke.nodeCopy('%clipboard%')
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
nuke.nodePaste('%clipboard%')
ipn = nuke.selectedNode()
return ipn
def add_comment_slate_node(self, instance):
node = instance.data.get("slateNode")
if not node:
return
comment = instance.context.data.get("comment")
intent = instance.context.data.get("intent")
try:
node["f_submission_note"].setValue(comment)
node["f_submitting_for"].setValue(intent)
except NameError:
return
instance.data.pop("slateNode")

View file

@ -0,0 +1,171 @@
import os
import nuke
from avalon.nuke import lib as anlib
import pyblish.api
import pype
class ExtractThumbnail(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Thumbnail"
families = ["review", "render.farm"]
hosts = ["nuke"]
def process(self, instance):
with anlib.maintained_selection():
self.log.debug("instance: {}".format(instance))
self.log.debug("instance.data[families]: {}".format(
instance.data["families"]))
self.render_thumbnail(instance)
def render_thumbnail(self, instance):
node = instance[0] # group node
self.log.info("Creating staging dir...")
if "representations" not in instance.data:
instance.data["representations"] = list()
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.info(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
temporary_nodes = []
collection = instance.data.get("collection", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
# get first and last frame
first_frame = min(collection.indexes)
last_frame = max(collection.indexes)
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
if "#" in fhead:
fhead = fhead.replace("#", "")[:-1]
path_render = os.path.join(staging_dir, fname).replace("\\", "/")
# check if file exist otherwise connect to write node
if os.path.isfile(path_render):
rnode = nuke.createNode("Read")
rnode["file"].setValue(path_render)
rnode["first"].setValue(first_frame)
rnode["origfirst"].setValue(first_frame)
rnode["last"].setValue(last_frame)
rnode["origlast"].setValue(last_frame)
temporary_nodes.append(rnode)
previous_node = rnode
else:
previous_node = node
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
reformat_node = nuke.createNode("Reformat")
ref_node = self.nodes.get("Reformat", None)
if ref_node:
for k, v in ref_node:
self.log.debug("k, v: {0}:{1}".format(k, v))
if isinstance(v, unicode):
v = str(v)
reformat_node[k].setValue(v)
reformat_node.setInput(0, previous_node)
previous_node = reformat_node
temporary_nodes.append(reformat_node)
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# create write node
write_node = nuke.createNode("Write")
file = fhead + "jpeg"
name = "thumbnail"
path = os.path.join(staging_dir, file).replace("\\", "/")
instance.data["thumbnail"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("jpeg")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
tags = ["thumbnail"]
# retime for
first_frame = int(last_frame) / 2
last_frame = int(last_frame) / 2
repre = {
'name': name,
'ext': "jpeg",
'files': file,
"stagingDir": staging_dir,
"frameStart": first_frame,
"frameEnd": last_frame,
"anatomy_template": "render",
"tags": tags
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
self.log.debug(
"representations: {}".format(instance.data["representations"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)
def get_view_process_node(self):
# Select only the target node
if nuke.selectedNodes():
[n.setSelected(False) for n in nuke.selectedNodes()]
ipn_orig = None
for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]:
ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
nuke.nodeCopy('%clipboard%')
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
nuke.nodePaste('%clipboard%')
ipn = nuke.selectedNode()
return ipn

View file

@ -2,8 +2,6 @@ import os
import json
import getpass
import nuke
from avalon import api
from avalon.vendor import requests
import re
@ -27,102 +25,133 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
def process(self, instance):
node = None
for x in instance:
if x.Class() == "Write":
node = x
if node is None:
return
node = instance[0]
context = instance.context
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
context = instance.context
workspace = os.path.dirname(context.data["currentFile"])
filepath = None
self.deadline_url = "{}/api/jobs".format(DEADLINE_REST_URL)
self._comment = context.data.get("comment", "")
self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion"))
self._deadline_user = context.data.get(
"deadlineUser", getpass.getuser())
self._frame_start = int(instance.data["frameStart"])
self._frame_end = int(instance.data["frameEnd"])
# get path
path = nuke.filename(node)
output_dir = instance.data['outputDir']
# get output path
render_path = instance.data['path']
script_path = context.data["currentFile"]
filepath = context.data["currentFile"]
# exception for slate workflow
if "slate" in instance.data["families"]:
self._frame_start -= 1
self.log.debug(filepath)
response = self.payload_submit(instance,
script_path,
render_path,
node.name()
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = response.json()
instance.data["publishJobState"] = "Active"
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
dirname = os.path.join(workspace, "renders")
deadline_user = context.data.get("deadlineUser", getpass.getuser())
jobname = "%s - %s" % (filename, instance.name)
ver = re.search(r"\d+\.\d+", context.data.get("hostVersion"))
if instance.data.get("bakeScriptPath"):
render_path = instance.data.get("bakeRenderPath")
script_path = instance.data.get("bakeScriptPath")
exe_node_name = instance.data.get("bakeWriteNodeName")
# exception for slate workflow
if "slate" in instance.data["families"]:
self._frame_start += 1
resp = self.payload_submit(instance,
script_path,
render_path,
exe_node_name,
response.json()
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = resp.json()
instance.data["publishJobState"] = "Suspended"
def payload_submit(self,
instance,
script_path,
render_path,
exe_node_name,
responce_data=None
):
render_dir = os.path.normpath(os.path.dirname(render_path))
script_name = os.path.basename(script_path)
jobname = "%s - %s" % (script_name, instance.name)
if not responce_data:
responce_data = {}
try:
# Ensure render folder exists
os.makedirs(dirname)
os.makedirs(render_dir)
except OSError:
pass
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
"BatchName": filename,
"BatchName": script_name,
# Job name, as seen in Monitor
"Name": jobname,
# Arbitrary username, for visualisation in Monitor
"UserName": deadline_user,
"UserName": self._deadline_user,
"Priority": instance.data["deadlinePriority"],
"Pool": "2d",
"SecondaryPool": "2d",
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
start=self._frame_start,
end=self._frame_end
),
"ChunkSize": instance.data["deadlineChunkSize"],
"Priority": instance.data["deadlinePriority"],
"Comment": self._comment,
"Comment": comment,
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
# "OutputFilename0": output_filename_0.replace("\\", "/"),
},
"PluginInfo": {
# Input
"SceneFile": filepath,
"SceneFile": script_path,
# Output directory and filename
"OutputFilePath": dirname.replace("\\", "/"),
"OutputFilePath": render_dir.replace("\\", "/"),
# "OutputFilePrefix": render_variables["filename_prefix"],
# Mandatory for Deadline
"Version": ver.group(),
"Version": self._ver.group(),
# Resolve relative references
"ProjectPath": workspace,
"ProjectPath": script_path,
"AWSAssetFile0": render_path,
# Only the specific write node is rendered.
"WriteNode": instance[0].name()
"WriteNode": exe_node_name
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
if responce_data.get("_id"):
payload["JobInfo"].update({
"JobType": "Normal",
"BatchName": responce_data["Props"]["Batch"],
"JobDependency0": responce_data["_id"],
"ChunkSize": 99999999
})
# Include critical environment variables with submission
keys = [
# This will trigger `userSetup.py` on the slave
# such that proper initialisation happens the same
# way as it does on a local machine.
# TODO(marcus): This won't work if the slaves don't
# have accesss to these paths, such as if slaves are
# running Linux and the submitter is on Windows.
"PYTHONPATH",
"PATH",
"AVALON_SCHEMA",
@ -168,11 +197,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
if key == "PYTHONPATH":
clean_path = clean_path.replace('python2', 'python3')
clean_path = clean_path.replace(
os.path.normpath(
environment['PYPE_STUDIO_CORE_MOUNT']), # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_PATH'])) # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_MOUNT']), # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_PATH'])) # noqa
clean_environment[key] = clean_path
environment = clean_environment
@ -187,20 +217,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
plugin = payload["JobInfo"]["Plugin"]
self.log.info("using render plugin : {}".format(plugin))
self.preflight_check(instance)
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(DEADLINE_REST_URL)
response = requests.post(url, json=payload)
response = requests.post(self.deadline_url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = response.json()
instance.data["publishJobState"] = "Active"
return response
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""

View file

@ -28,7 +28,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
""" Validates file output. """
order = pyblish.api.ValidatorOrder + 0.1
families = ["render.no"]
families = ["render"]
label = "Validate rendered frame"
hosts = ["nuke", "nukestudio"]
@ -75,6 +75,9 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
self.log.info(
'len(collection.indexes): {}'.format(collected_frames_len)
)
if "slate" in instance.data["families"]:
collected_frames_len -= 1
assert (collected_frames_len == frame_length), (
"{} missing frames. Use repair to render all frames"

View file

@ -8,24 +8,31 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
"""Ensure knobs are consistent.
Knobs to validate and their values comes from the
"nuke/knobs.json" preset, which needs this structure:
{
"family": {
"knob_name": knob_value
}
}
Example for presets in config:
"presets/plugins/nuke/publish.json" preset, which needs this structure:
"ValidateNukeWriteKnobs": {
"enabled": true,
"knobs": {
"family": {
"knob_name": knob_value
}
}
}
"""
order = pyblish.api.ValidatorOrder
label = "Knobs"
label = "Validate Write Knobs"
hosts = ["nuke"]
actions = [pype.api.RepairContextAction]
optional = True
def process(self, context):
# Check for preset existence.
if not context.data["presets"]["nuke"].get("knobs"):
if not getattr(self, "knobs"):
return
self.log.debug("__ self.knobs: {}".format(self.knobs))
invalid = self.get_invalid(context, compute=True)
if invalid:
@ -43,7 +50,6 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
@classmethod
def get_invalid_knobs(cls, context):
presets = context.data["presets"]["nuke"]["knobs"]
invalid_knobs = []
for instance in context:
# Filter publisable instances.
@ -53,15 +59,15 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin):
# Filter families.
families = [instance.data["family"]]
families += instance.data.get("families", [])
families = list(set(families) & set(presets.keys()))
families = list(set(families) & set(cls.knobs.keys()))
if not families:
continue
# Get all knobs to validate.
knobs = {}
for family in families:
for preset in presets[family]:
knobs.update({preset: presets[family][preset]})
for preset in cls.knobs[family]:
knobs.update({preset: cls.knobs[family][preset]})
# Get invalid knobs.
nodes = []

View file

@ -169,32 +169,44 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:

View file

@ -3,6 +3,7 @@ from avalon import io
from pype.action import get_errored_instances_from_context
import pype.api as pype
@pyblish.api.log
class RepairNukestudioVersionUp(pyblish.api.Action):
label = "Version Up Workfile"
@ -53,13 +54,17 @@ class ValidateVersion(pyblish.api.InstancePlugin):
io.install()
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": subset_name})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": subset_name
})
version_db = io.find_one({
'type': 'version',

View file

@ -77,32 +77,44 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one({"type": "project",
"name": project_name},
projection={"config": True, "data": True})
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({"type": "subset",
"name": subset_name,
"parent": asset["_id"]})
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:

View file

@ -170,8 +170,10 @@ def switch(asset_name, filepath=None, new=True):
assert asset, "Could not find '%s' in the database" % asset_name
# Get current project
self._project = io.find_one({"type": "project",
"name": api.Session["AVALON_PROJECT"]})
self._project = io.find_one({
"type": "project",
"name": api.Session["AVALON_PROJECT"]
})
# Go to comp
if not filepath:

View file

@ -39,6 +39,25 @@ def _streams(source):
return json.loads(out)['streams']
def get_fps(str_value):
if str_value == "0/0":
print("Source has \"r_frame_rate\" value set to \"0/0\".")
return "Unknown"
items = str_value.split("/")
if len(items) == 1:
fps = float(items[0])
elif len(items) == 2:
fps = float(items[0]) / float(items[1])
# Check if fps is integer or float number
if int(fps) == fps:
fps = int(fps)
return str(fps)
class ModifiedBurnins(ffmpeg_burnins.Burnins):
'''
This is modification of OTIO FFmpeg Burnin adapter.
@ -95,6 +114,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
streams = _streams(source)
super().__init__(source, streams)
if options_init:
self.options_init.update(options_init)
@ -139,12 +159,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
options['frame_offset'] = start_frame
expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
_text = str(int(self.end_frame + options['frame_offset']))
if text and isinstance(text, str):
text = r"{}".format(text)
expr = text.replace("{current_frame}", expr)
text = text.replace("{current_frame}", _text)
options['expression'] = expr
text = str(int(self.end_frame + options['frame_offset']))
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_timecode(self, align, options=None, start_frame=None):
@ -328,6 +349,17 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
frame_start = data.get("frame_start")
frame_start_tc = data.get('frame_start_tc', frame_start)
stream = burnin._streams[0]
if "resolution_width" not in data:
data["resolution_width"] = stream.get("width", "Unknown")
if "resolution_height" not in data:
data["resolution_height"] = stream.get("height", "Unknown")
if "fps" not in data:
data["fps"] = get_fps(stream.get("r_frame_rate", "0/0"))
for align_text, preset in presets.get('burnins', {}).items():
align = None
if align_text == 'TOP_LEFT':
@ -382,12 +414,14 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
elif bi_func == 'timecode':
burnin.add_timecode(align, start_frame=frame_start_tc)
elif bi_func == 'text':
if not preset.get('text'):
log.error('Text is not set for text function burnin!')
return
text = preset['text'].format(**data)
burnin.add_text(text, align)
elif bi_func == "datetime":
date_format = preset["format"]
burnin.add_datetime(date_format, align)
@ -414,4 +448,4 @@ if __name__ == '__main__':
data['codec'],
data['output'],
data['burnin_data']
)
)

View file

@ -4,7 +4,16 @@ import os
import logging
import subprocess
import platform
from shutil import which
try:
from shutil import which
except ImportError:
# we are in python < 3.3
def which(command):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, command)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
handler = logging.basicConfig()
log = logging.getLogger("Publish Image Sequences")

View file

@ -462,8 +462,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
# Check whether the conversion can be done by the Loader.
# They *must* use the same asset, subset and Loader for
# `api.update` to make sense.
old = io.find_one({"_id": io.ObjectId(representation_current)})
new = io.find_one({"_id": io.ObjectId(representation_new)})
old = io.find_one({
"_id": io.ObjectId(representation_current)
})
new = io.find_one({
"_id": io.ObjectId(representation_new)
})
is_valid = compare_representations(old=old, new=new)
if not is_valid:
log.error("Skipping: %s. See log for details.",

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

View file

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="iso-8859-1"?>
<!-- Generator: Adobe Illustrator 19.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 56 56" style="enable-background:new 0 0 56 56;" xml:space="preserve">
<g>
<path d="M28,0C12.561,0,0,12.561,0,28s12.561,28,28,28s28-12.561,28-28S43.439,0,28,0z M28,54C13.663,54,2,42.336,2,28
S13.663,2,28,2s26,11.664,26,26S42.337,54,28,54z"/>
<path d="M40,16H16c-0.553,0-1,0.448-1,1s0.447,1,1,1h24c0.553,0,1-0.448,1-1S40.553,16,40,16z"/>
<path d="M40,27H16c-0.553,0-1,0.448-1,1s0.447,1,1,1h24c0.553,0,1-0.448,1-1S40.553,27,40,27z"/>
<path d="M40,38H16c-0.553,0-1,0.448-1,1s0.447,1,1,1h24c0.553,0,1-0.448,1-1S40.553,38,40,38z"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 823 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

View file

@ -1,23 +0,0 @@
<?xml version="1.0" encoding="iso-8859-1"?>
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="482.428px" height="482.429px" viewBox="0 0 482.428 482.429" fill="#ffffff" style="enable-background:new 0 0 482.428 482.429;"
xml:space="preserve">
<g>
<path d="M381.163,57.799h-75.094C302.323,25.316,274.686,0,241.214,0c-33.471,0-61.104,25.315-64.85,57.799h-75.098
c-30.39,0-55.111,24.728-55.111,55.117v2.828c0,23.223,14.46,43.1,34.83,51.199v260.369c0,30.39,24.724,55.117,55.112,55.117
h210.236c30.389,0,55.111-24.729,55.111-55.117V166.944c20.369-8.1,34.83-27.977,34.83-51.199v-2.828
C436.274,82.527,411.551,57.799,381.163,57.799z M241.214,26.139c19.037,0,34.927,13.645,38.443,31.66h-76.879
C206.293,39.783,222.184,26.139,241.214,26.139z M375.305,427.312c0,15.978-13,28.979-28.973,28.979H136.096
c-15.973,0-28.973-13.002-28.973-28.979V170.861h268.182V427.312z M410.135,115.744c0,15.978-13,28.979-28.973,28.979H101.266
c-15.973,0-28.973-13.001-28.973-28.979v-2.828c0-15.978,13-28.979,28.973-28.979h279.897c15.973,0,28.973,13.001,28.973,28.979
V115.744z"/>
<path d="M171.144,422.863c7.218,0,13.069-5.853,13.069-13.068V262.641c0-7.216-5.852-13.07-13.069-13.07
c-7.217,0-13.069,5.854-13.069,13.07v147.154C158.074,417.012,163.926,422.863,171.144,422.863z"/>
<path d="M241.214,422.863c7.218,0,13.07-5.853,13.07-13.068V262.641c0-7.216-5.854-13.07-13.07-13.07
c-7.217,0-13.069,5.854-13.069,13.07v147.154C228.145,417.012,233.996,422.863,241.214,422.863z"/>
<path d="M311.284,422.863c7.217,0,13.068-5.853,13.068-13.068V262.641c0-7.216-5.852-13.07-13.068-13.07
c-7.219,0-13.07,5.854-13.07,13.07v147.154C298.213,417.012,304.067,422.863,311.284,422.863z"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View file

@ -1,15 +1,10 @@
import os
from . import QtCore, QtGui, QtWidgets
from . import SvgButton
from . import get_resource
from pypeapp import style
class ComponentItem(QtWidgets.QFrame):
C_NORMAL = '#777777'
C_HOVER = '#ffffff'
C_ACTIVE = '#4BB543'
C_ACTIVE_HOVER = '#4BF543'
signal_remove = QtCore.Signal(object)
signal_thumbnail = QtCore.Signal(object)
@ -58,10 +53,8 @@ class ComponentItem(QtWidgets.QFrame):
self.icon.setText("")
self.icon.setScaledContents(True)
self.btn_action_menu = SvgButton(
get_resource('menu.svg'), 22, 22,
[self.C_NORMAL, self.C_HOVER],
frame_image_info, False
self.btn_action_menu = PngButton(
name="menu", size=QtCore.QSize(22, 22)
)
self.action_menu = QtWidgets.QMenu()
@ -88,7 +81,9 @@ class ComponentItem(QtWidgets.QFrame):
self.file_info.setStyleSheet('padding-left:3px;')
expanding_sizePolicy.setHeightForWidth(self.name.sizePolicy().hasHeightForWidth())
expanding_sizePolicy.setHeightForWidth(
self.name.sizePolicy().hasHeightForWidth()
)
frame_name_repre = QtWidgets.QFrame(frame)
@ -104,7 +99,8 @@ class ComponentItem(QtWidgets.QFrame):
layout.addWidget(self.ext, alignment=QtCore.Qt.AlignRight)
frame_name_repre.setSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.MinimumExpanding
)
# Repre + icons
@ -156,12 +152,7 @@ class ComponentItem(QtWidgets.QFrame):
layout_main.addWidget(frame_middle)
self.remove = SvgButton(
get_resource('trash.svg'), 22, 22,
[self.C_NORMAL, self.C_HOVER],
frame, False
)
self.remove = PngButton(name="trash", size=QtCore.QSize(22, 22))
layout_main.addWidget(self.remove)
layout = QtWidgets.QVBoxLayout(self)
@ -308,14 +299,15 @@ class ComponentItem(QtWidgets.QFrame):
class LightingButton(QtWidgets.QPushButton):
lightingbtnstyle = """
QPushButton {
font: %(font_size_pt)spt;
text-align: center;
color: #777777;
background-color: transparent;
border-width: 1px;
border-color: #777777;
border-style: solid;
padding-top: 2px;
padding-bottom: 2px;
padding-top: 0px;
padding-bottom: 0px;
padding-left: 3px;
padding-right: 3px;
border-radius: 3px;
@ -351,14 +343,180 @@ class LightingButton(QtWidgets.QPushButton):
color: #4BF543;
}
"""
def __init__(self, text, *args, **kwargs):
super().__init__(text, *args, **kwargs)
self.setStyleSheet(self.lightingbtnstyle)
def __init__(self, text, font_size_pt=8, *args, **kwargs):
super(LightingButton, self).__init__(text, *args, **kwargs)
self.setStyleSheet(self.lightingbtnstyle % {
"font_size_pt": font_size_pt
})
self.setCheckable(True)
preview_font_metrics = self.fontMetrics().boundingRect(text)
width = preview_font_metrics.width() + 16
height = preview_font_metrics.height() + 5
self.setMaximumWidth(width)
self.setMaximumHeight(height)
class PngFactory:
png_names = {
"trash": {
"normal": QtGui.QIcon(get_resource("trash.png")),
"hover": QtGui.QIcon(get_resource("trash_hover.png")),
"pressed": QtGui.QIcon(get_resource("trash_pressed.png")),
"pressed_hover": QtGui.QIcon(
get_resource("trash_pressed_hover.png")
),
"disabled": QtGui.QIcon(get_resource("trash_disabled.png"))
},
"menu": {
"normal": QtGui.QIcon(get_resource("menu.png")),
"hover": QtGui.QIcon(get_resource("menu_hover.png")),
"pressed": QtGui.QIcon(get_resource("menu_pressed.png")),
"pressed_hover": QtGui.QIcon(
get_resource("menu_pressed_hover.png")
),
"disabled": QtGui.QIcon(get_resource("menu_disabled.png"))
}
}
class PngButton(QtWidgets.QPushButton):
png_button_style = """
QPushButton {
border: none;
background-color: transparent;
padding-top: 0px;
padding-bottom: 0px;
padding-left: 0px;
padding-right: 0px;
}
QPushButton:hover {}
QPushButton:pressed {}
QPushButton:disabled {}
QPushButton:checked {}
QPushButton:checked:hover {}
QPushButton:checked:pressed {}
"""
def __init__(
self, name=None, path=None, hover_path=None, pressed_path=None,
hover_pressed_path=None, disabled_path=None,
size=None, *args, **kwargs
):
self._hovered = False
self._pressed = False
super(PngButton, self).__init__(*args, **kwargs)
self.setStyleSheet(self.png_button_style)
png_dict = {}
if name:
png_dict = PngFactory.png_names.get(name) or {}
if not png_dict:
print((
"WARNING: There is not set icon with name \"{}\""
"in PngFactory!"
).format(name))
ico_normal = png_dict.get("normal")
ico_hover = png_dict.get("hover")
ico_pressed = png_dict.get("pressed")
ico_hover_pressed = png_dict.get("pressed_hover")
ico_disabled = png_dict.get("disabled")
if path:
ico_normal = QtGui.QIcon(path)
if hover_path:
ico_hover = QtGui.QIcon(hover_path)
if pressed_path:
ico_pressed = QtGui.QIcon(hover_path)
if hover_pressed_path:
ico_hover_pressed = QtGui.QIcon(hover_pressed_path)
if disabled_path:
ico_disabled = QtGui.QIcon(disabled_path)
self.setIcon(ico_normal)
if size:
self.setIconSize(size)
self.setMaximumSize(size)
self.ico_normal = ico_normal
self.ico_hover = ico_hover
self.ico_pressed = ico_pressed
self.ico_hover_pressed = ico_hover_pressed
self.ico_disabled = ico_disabled
def setDisabled(self, in_bool):
super(PngButton, self).setDisabled(in_bool)
icon = self.ico_normal
if in_bool and self.ico_disabled:
icon = self.ico_disabled
self.setIcon(icon)
def enterEvent(self, event):
self._hovered = True
if not self.isEnabled():
return
icon = self.ico_normal
if self.ico_hover:
icon = self.ico_hover
if self._pressed and self.ico_hover_pressed:
icon = self.ico_hover_pressed
if self.icon() != icon:
self.setIcon(icon)
def mouseMoveEvent(self, event):
super(PngButton, self).mouseMoveEvent(event)
if self._pressed:
mouse_pos = event.pos()
hovering = self.rect().contains(mouse_pos)
if hovering and not self._hovered:
self.enterEvent(event)
elif not hovering and self._hovered:
self.leaveEvent(event)
def leaveEvent(self, event):
self._hovered = False
if not self.isEnabled():
return
icon = self.ico_normal
if self._pressed and self.ico_pressed:
icon = self.ico_pressed
if self.icon() != icon:
self.setIcon(icon)
def mousePressEvent(self, event):
self._pressed = True
if not self.isEnabled():
return
icon = self.ico_hover
if self.ico_pressed:
icon = self.ico_pressed
if self.ico_hover_pressed:
mouse_pos = event.pos()
if self.rect().contains(mouse_pos):
icon = self.ico_hover_pressed
if icon is None:
icon = self.ico_normal
if self.icon() != icon:
self.setIcon(icon)
def mouseReleaseEvent(self, event):
if not self.isEnabled():
return
if self._pressed:
self._pressed = False
mouse_pos = event.pos()
if self.rect().contains(mouse_pos):
self.clicked.emit()
icon = self.ico_normal
if self._hovered and self.ico_hover:
icon = self.ico_hover
if self.icon() != icon:
self.setIcon(icon)

BIN
res/app_icons/blender.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

View file

@ -0,0 +1,34 @@
<?xml version="1.0" ?>
<svg id="Icons" version="1.1" viewBox="0 0 512 512" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g>
<linearGradient gradientUnits="userSpaceOnUse" id="SVGID_1_" x1="-0.0000027" x2="512" y1="256" y2="256">
<stop offset="0" style="stop-color:#00b300"/>
<stop offset="1" style="stop-color:#006600"/>
</linearGradient>
<circle cx="256" cy="256" fill="url(#SVGID_1_)" r="256"/>
<linearGradient gradientUnits="userSpaceOnUse" id="SVGID_2_" x1="42.6666641" x2="469.3333435" y1="256.0005188" y2="256.0005188">
<stop offset="0" style="stop-color:#006600"/>
<stop offset="1" style="stop-color:#00b300"/>
</linearGradient>
<path d="M256,469.3338623c-117.6314697,0-213.3333435-95.7023926-213.3333435-213.3333435 c0-117.6314545,95.7018661-213.333313,213.3333435-213.333313c117.6357422,0,213.3333435,95.7018661,213.3333435,213.333313 C469.3333435,373.6314697,373.6357422,469.3338623,256,469.3338623z" fill="url(#SVGID_2_)"/>
</g>
<g transform="
translate(120, 120)
scale(9)
">
<style type="text/css">
.st0{fill:none;stroke:#000000;stroke-width:2;stroke-miterlimit:10;}
.st1{fill:none;stroke:#000000;stroke-width:2;stroke-linejoin:round;stroke-miterlimit:10;}
.st2{fill:none;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;}
.st3{fill:none;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-miterlimit:10;}
.st4{fill:none;stroke:#000000;stroke-width:2;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:3;}
</style>
<polyline class="st1" points="2,8 19,8 19,23 13,23 "/>
<circle class="st1" cx="24" cy="23" r="2"/>
<circle class="st1" cx="8" cy="23" r="2"/>
<polyline class="st1" points="19,23 19,12 25,12 29,17 29,23 26,23 "/>
<line class="st1" x1="4" x2="13" y1="12" y2="12"/>
<line class="st1" x1="2" x2="11" y1="16" y2="16"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2 KiB

3
setup/blender/init.py Normal file
View file

@ -0,0 +1,3 @@
from pype import blender
blender.install()

View file

@ -0,0 +1 @@
import knob_scripter

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Some files were not shown because too many files have changed in this diff Show more