Merge remote-tracking branch 'origin/release/2.9.0' into feature/102-_draft_Celaction_quick_integration

# Conflicts:
#	pype/plugins/global/publish/submit_publish_job.py
This commit is contained in:
Jakub Jezek 2020-06-03 09:33:29 +01:00
commit 18e864cc60
74 changed files with 4402 additions and 1915 deletions

View file

@ -15,14 +15,11 @@
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys
import os
from pprint import pprint
from pypeapp.pypeLauncher import PypeLauncher
from pypeapp.storage import Storage
from pypeapp.deployment import Deployment
pype_setup = os.getenv('PYPE_ROOT')
pype_setup = os.getenv('PYPE_SETUP_PATH')
d = Deployment(pype_setup)
launcher = PypeLauncher()
@ -32,7 +29,6 @@ os.environ['PYPE_CONFIG'] = config_path
os.environ['TOOL_ENV'] = os.path.normpath(os.path.join(config_path,
'environments'))
launcher._add_modules()
Storage().update_environment()
launcher._load_default_environments(tools=tools)
# -- Project information -----------------------------------------------------

View file

@ -25,15 +25,15 @@ set PYTHONPATH=%%d;!PYTHONPATH!
echo ^>^>^> Setting PYPE_CONFIG
call :ResolvePath pypeconfig "..\pype-config"
set PYPE_CONFIG=%pypeconfig%
echo ^>^>^> Setting PYPE_ROOT
echo ^>^>^> Setting PYPE_SETUP_PATH
call :ResolvePath pyperoot "..\..\"
set PYPE_ROOT=%pyperoot%
set PYTHONPATH=%PYPE_ROOT%;%PYTHONPATH%
set PYPE_SETUP_PATH=%pyperoot%
set PYTHONPATH=%PYPE_SETUP_PATH%;%PYTHONPATH%
echo ^>^>^> Setting PYPE_ENV
set PYPE_ENV="C:\Users\Public\pype_env2"
call "docs\make.bat" clean
sphinx-apidoc -M -f -d 6 --ext-autodoc --ext-intersphinx --ext-viewcode -o docs\source pype %PYPE_ROOT%\repos\pype\pype\vendor\*
sphinx-apidoc -M -f -d 6 --ext-autodoc --ext-intersphinx --ext-viewcode -o docs\source pype %PYPE_SETUP_PATH%\repos\pype\pype\vendor\*
call "docs\make.bat" html
echo ^>^>^> Doing cleanup ...
set PYTHONPATH=%_OLD_PYTHONPATH%

View file

@ -3,7 +3,7 @@ import os
from pyblish import api as pyblish
from avalon import api as avalon
from .lib import filter_pyblish_plugins
from pypeapp import config
from pypeapp import config, Anatomy
import logging
@ -99,6 +99,10 @@ def install():
avalon.register_plugin_path(avalon.Creator, path)
avalon.register_plugin_path(avalon.InventoryAction, path)
if project_name:
anatomy = Anatomy(project_name)
anatomy.set_root_environments()
avalon.register_root(anatomy.roots)
# apply monkey patched discover to original one
avalon.discover = patched_discover

View file

@ -45,10 +45,9 @@ class AvalonApps:
def show_launcher(self):
# if app_launcher don't exist create it/otherwise only show main window
if self.app_launcher is None:
root = os.path.realpath(os.environ["AVALON_PROJECTS"])
io.install()
APP_PATH = launcher_lib.resource("qml", "main.qml")
self.app_launcher = launcher_widget.Launcher(root, APP_PATH)
self.app_launcher = launcher_widget.Launcher(APP_PATH)
self.app_launcher.window.show()
def show_library_loader(self):

View file

@ -70,24 +70,6 @@ class AvalonRestApi(RestApi):
_asset, identificator, _project_name
))
@RestApi.route("/publish/<asset_name>",
url_prefix="/premiere", methods="GET")
def publish(self, request):
"""
http://localhost:8021/premiere/publish/shot021?json_in=this/path/file_in.json&json_out=this/path/file_out.json
"""
asset_name = request.url_data["asset_name"]
query = request.query
data = request.request_data
output = {
"message": "Got your data. Thanks.",
"your_data": data,
"your_query": query,
"your_asset_is": asset_name
}
return CallbackResult(data=self.result_to_json(output))
def result_to_json(self, result):
""" Converts result of MongoDB query to dict without $oid (ObjectId)
keys with help of regex matching.

View file

@ -26,7 +26,7 @@ class ClockifySettings(QtWidgets.QWidget):
elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'):
self.setWindowIcon(self.parent.parent.icon)
else:
pype_setup = os.getenv('PYPE_ROOT')
pype_setup = os.getenv('PYPE_SETUP_PATH')
items = [pype_setup, "app", "resources", "icon.png"]
fname = os.path.sep.join(items)
icon = QtGui.QIcon(fname)

View file

@ -0,0 +1,107 @@
import os
import collections
import ftrack_api
from pype.ftrack import BaseAction
from pype.ftrack.lib.avalon_sync import get_avalon_attr
class CleanHierarchicalAttrsAction(BaseAction):
identifier = "clean.hierarchical.attr"
label = "Pype Admin"
variant = "- Clean hierarchical custom attributes"
description = "Unset empty hierarchical attribute values."
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get("PYPE_STATICS_SERVER", "")
)
all_project_entities_query = (
"select id, name, parent_id, link"
" from TypedContext where project_id is \"{}\""
)
cust_attr_query = (
"select value, entity_id from CustomAttributeValue "
"where entity_id in ({}) and configuration_id is \"{}\""
)
def discover(self, session, entities, event):
"""Show only on project entity."""
if len(entities) == 1 and entities[0].entity_type.lower() == "project":
return True
return False
def launch(self, session, entities, event):
project = entities[0]
user_message = "This may take some time"
self.show_message(event, user_message, result=True)
self.log.debug("Preparing entities for cleanup.")
all_entities = session.query(
self.all_project_entities_query.format(project["id"])
).all()
all_entities_ids = [
"\"{}\"".format(entity["id"])
for entity in all_entities
if entity.entity_type.lower() != "task"
]
self.log.debug(
"Collected {} entities to process.".format(len(all_entities_ids))
)
entity_ids_joined = ", ".join(all_entities_ids)
attrs, hier_attrs = get_avalon_attr(session)
for attr in hier_attrs:
configuration_key = attr["key"]
self.log.debug(
"Looking for cleanup of custom attribute \"{}\"".format(
configuration_key
)
)
configuration_id = attr["id"]
call_expr = [{
"action": "query",
"expression": self.cust_attr_query.format(
entity_ids_joined, configuration_id
)
}]
[values] = self.session.call(call_expr)
data = {}
for item in values["data"]:
value = item["value"]
if value is None:
data[item["entity_id"]] = value
if not data:
self.log.debug(
"Nothing to clean for \"{}\".".format(configuration_key)
)
continue
self.log.debug("Cleaning up {} values for \"{}\".".format(
len(data), configuration_key
))
for entity_id, value in data.items():
entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"entity_id": entity_id
})
session.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
"CustomAttributeValue",
entity_key
)
)
session.commit()
return True
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
CleanHierarchicalAttrsAction(session, plugins_presets).register()

View file

@ -1,14 +1,11 @@
import os
import sys
import argparse
import collections
import json
import arrow
import logging
import ftrack_api
from pype.ftrack import BaseAction
from pype.ftrack.lib.avalon_sync import CustAttrIdKey
from pypeapp import config
from ftrack_api.exception import NoResultFoundError
"""
This action creates/updates custom attributes.
@ -135,11 +132,6 @@ class CustomAttributes(BaseAction):
return True
def launch(self, session, entities, event):
self.types = {}
self.object_type_ids = {}
self.groups = {}
self.security_roles = {}
# JOB SETTINGS
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
@ -153,7 +145,8 @@ class CustomAttributes(BaseAction):
})
session.commit()
try:
self.avalon_mongo_id_attributes(session)
self.prepare_global_data(session)
self.avalon_mongo_id_attributes(session, event)
self.custom_attributes_from_file(session, event)
job['status'] = 'done'
@ -170,60 +163,180 @@ class CustomAttributes(BaseAction):
return True
def avalon_mongo_id_attributes(self, session):
def prepare_global_data(self, session):
self.types_per_name = {
attr_type["name"].lower(): attr_type
for attr_type in session.query("CustomAttributeType").all()
}
self.security_roles = {
role["name"].lower(): role
for role in session.query("SecurityRole").all()
}
object_types = session.query("ObjectType").all()
self.object_types_per_id = {
object_type["id"]: object_type for object_type in object_types
}
self.object_types_per_name = {
object_type["name"].lower(): object_type
for object_type in object_types
}
self.groups = {}
def avalon_mongo_id_attributes(self, session, event):
hierarchical_attr, object_type_attrs = (
self.mongo_id_custom_attributes(session)
)
if hierarchical_attr is None:
self.create_hierarchical_mongo_attr(session)
hierarchical_attr, object_type_attrs = (
self.mongo_id_custom_attributes(session)
)
if hierarchical_attr is None:
return
if object_type_attrs:
self.convert_mongo_id_to_hierarchical(
hierarchical_attr, object_type_attrs, session, event
)
def mongo_id_custom_attributes(self, session):
cust_attrs_query = (
"select id, entity_type, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration"
" where key = \"{}\""
).format(CustAttrIdKey)
mongo_id_avalon_attr = session.query(cust_attrs_query).all()
heirarchical_attr = None
object_type_attrs = []
for cust_attr in mongo_id_avalon_attr:
if cust_attr["is_hierarchical"]:
heirarchical_attr = cust_attr
else:
object_type_attrs.append(cust_attr)
return heirarchical_attr, object_type_attrs
def create_hierarchical_mongo_attr(self, session):
# Attribute Name and Label
cust_attr_label = 'Avalon/Mongo Id'
# Types that don't need object_type_id
base = {'show'}
# Don't create custom attribute on these entity types:
exceptions = ['task', 'milestone']
exceptions.extend(base)
# Get all possible object types
all_obj_types = session.query('ObjectType').all()
# Filter object types by exceptions
filtered_types_id = set()
for obj_type in all_obj_types:
name = obj_type['name']
if " " in name:
name = name.replace(' ', '')
if obj_type['name'] not in self.object_type_ids:
self.object_type_ids[name] = obj_type['id']
if name.lower() not in exceptions:
filtered_types_id.add(obj_type['id'])
cust_attr_label = "Avalon/Mongo ID"
# Set security roles for attribute
role_list = ['API', 'Administrator']
roles = self.get_security_role(role_list)
role_list = ("API", "Administrator", "Pypeclub")
roles = self.get_security_roles(role_list)
# Set Text type of Attribute
custom_attribute_type = self.get_type('text')
custom_attribute_type = self.types_per_name["text"]
# Set group to 'avalon'
group = self.get_group('avalon')
group = self.get_group("avalon")
data = {}
data['key'] = CustAttrIdKey
data['label'] = cust_attr_label
data['type'] = custom_attribute_type
data['default'] = ''
data['write_security_roles'] = roles
data['read_security_roles'] = roles
data['group'] = group
data['config'] = json.dumps({'markdown': False})
data = {
"key": CustAttrIdKey,
"label": cust_attr_label,
"type": custom_attribute_type,
"default": "",
"write_security_roles": roles,
"read_security_roles": roles,
"group": group,
"is_hierarchical": True,
"entity_type": "show",
"config": json.dumps({"markdown": False})
}
for entity_type in base:
data['entity_type'] = entity_type
self.process_attribute(data)
self.process_attribute(data)
data['entity_type'] = 'task'
for object_type_id in filtered_types_id:
data['object_type_id'] = str(object_type_id)
self.process_attribute(data)
def convert_mongo_id_to_hierarchical(
self, hierarchical_attr, object_type_attrs, session, event
):
user_msg = "Converting old custom attributes. This may take some time."
self.show_message(event, user_msg, True)
self.log.info(user_msg)
object_types_per_id = {
object_type["id"]: object_type
for object_type in session.query("ObjectType").all()
}
cust_attr_query = (
"select value, entity_id from ContextCustomAttributeValue "
"where configuration_id is {}"
)
for attr_def in object_type_attrs:
attr_ent_type = attr_def["entity_type"]
if attr_ent_type == "show":
entity_type_label = "Project"
elif attr_ent_type == "task":
entity_type_label = (
object_types_per_id[attr_def["object_type_id"]]["name"]
)
else:
self.log.warning(
"Unsupported entity type: \"{}\". Skipping.".format(
attr_ent_type
)
)
continue
self.log.debug((
"Converting Avalon MongoID attr for Entity type \"{}\"."
).format(entity_type_label))
call_expr = [{
"action": "query",
"expression": cust_attr_query.format(attr_def["id"])
}]
if hasattr(session, "call"):
[values] = session.call(call_expr)
else:
[values] = session._call(call_expr)
for value in values["data"]:
table_values = collections.OrderedDict({
"configuration_id": hierarchical_attr["id"],
"entity_id": value["entity_id"]
})
session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
table_values,
"value",
ftrack_api.symbol.NOT_SET,
value["value"]
)
)
try:
session.commit()
except Exception:
session.rollback()
self.log.warning(
(
"Couldn't transfer Avalon Mongo ID"
" attribute for entity type \"{}\"."
).format(entity_type_label),
exc_info=True
)
try:
session.delete(attr_def)
session.commit()
except Exception:
session.rollback()
self.log.warning(
(
"Couldn't delete Avalon Mongo ID"
" attribute for entity type \"{}\"."
).format(entity_type_label),
exc_info=True
)
def custom_attributes_from_file(self, session, event):
presets = config.get_presets()['ftrack']['ftrack_custom_attributes']
@ -317,11 +430,11 @@ class CustomAttributes(BaseAction):
'Type {} is not valid'.format(attr['type'])
)
type_name = attr['type'].lower()
output['key'] = attr['key']
output['label'] = attr['label']
output['type'] = self.get_type(type_name)
type_name = attr['type'].lower()
output['type'] = self.types_per_name[type_name]
config = None
if type_name == 'number':
@ -382,15 +495,15 @@ class CustomAttributes(BaseAction):
config = json.dumps({
'multiSelect': multiSelect,
'data': json.dumps(data)
})
})
return config
def get_group(self, attr):
if isinstance(attr, str):
group_name = attr
else:
if isinstance(attr, dict):
group_name = attr['group'].lower()
else:
group_name = attr
if group_name in self.groups:
return self.groups[group_name]
@ -416,48 +529,30 @@ class CustomAttributes(BaseAction):
'Found more than one group "{}"'.format(group_name)
)
def get_role_ALL(self):
role_name = 'ALL'
if role_name in self.security_roles:
all_roles = self.security_roles[role_name]
else:
all_roles = self.session.query('SecurityRole').all()
self.security_roles[role_name] = all_roles
for role in all_roles:
if role['name'] not in self.security_roles:
self.security_roles[role['name']] = role
return all_roles
def get_security_roles(self, security_roles):
security_roles_lowered = tuple(name.lower() for name in security_roles)
if (
len(security_roles_lowered) == 0
or "all" in security_roles_lowered
):
return list(self.security_roles.values())
output = []
if security_roles_lowered[0] == "except":
excepts = security_roles_lowered[1:]
for role_name, role in self.security_roles.items():
if role_name not in excepts:
output.append(role)
def get_security_role(self, security_roles):
roles = []
security_roles_lowered = [role.lower() for role in security_roles]
if len(security_roles) == 0 or 'all' in security_roles_lowered:
roles = self.get_role_ALL()
elif security_roles_lowered[0] == 'except':
excepts = security_roles[1:]
all = self.get_role_ALL()
for role in all:
if role['name'] not in excepts:
roles.append(role)
if role['name'] not in self.security_roles:
self.security_roles[role['name']] = role
else:
for role_name in security_roles:
for role_name in security_roles_lowered:
if role_name in self.security_roles:
roles.append(self.security_roles[role_name])
continue
try:
query = 'SecurityRole where name is "{}"'.format(role_name)
role = self.session.query(query).one()
self.security_roles[role_name] = role
roles.append(role)
except NoResultFoundError:
output.append(self.security_roles[role_name])
else:
raise CustAttrException((
'Securit role "{}" does not exist'
"Securit role \"{}\" was not found in Ftrack."
).format(role_name))
return roles
return output
def get_default(self, attr):
type = attr['type']
@ -512,32 +607,17 @@ class CustomAttributes(BaseAction):
roles_read = attr['read_security_roles']
if 'read_security_roles' in output:
roles_write = attr['write_security_roles']
output['read_security_roles'] = self.get_security_role(roles_read)
output['write_security_roles'] = self.get_security_role(roles_write)
output['read_security_roles'] = self.get_security_roles(roles_read)
output['write_security_roles'] = self.get_security_roles(roles_write)
return output
def get_type(self, type_name):
if type_name in self.types:
return self.types[type_name]
query = 'CustomAttributeType where name is "{}"'.format(type_name)
type = self.session.query(query).one()
self.types[type_name] = type
return type
def get_entity_type(self, attr):
if 'is_hierarchical' in attr:
if attr['is_hierarchical'] is True:
type = 'show'
if 'entity_type' in attr:
type = attr['entity_type']
return {
'is_hierarchical': True,
'entity_type': type
}
if attr.get("is_hierarchical", False):
return {
"is_hierarchical": True,
"entity_type": attr.get("entity_type") or "show"
}
if 'entity_type' not in attr:
raise CustAttrException('Missing entity_type')
@ -549,23 +629,16 @@ class CustomAttributes(BaseAction):
raise CustAttrException('Missing object_type')
object_type_name = attr['object_type']
if object_type_name not in self.object_type_ids:
try:
query = 'ObjectType where name is "{}"'.format(
object_type_name
)
object_type_id = self.session.query(query).one()['id']
except Exception:
raise CustAttrException((
'Object type with name "{}" don\'t exist'
).format(object_type_name))
self.object_type_ids[object_type_name] = object_type_id
else:
object_type_id = self.object_type_ids[object_type_name]
object_type_name_low = object_type_name.lower()
object_type = self.object_types_per_name.get(object_type_name_low)
if not object_type:
raise CustAttrException((
'Object type with name "{}" don\'t exist'
).format(object_type_name))
return {
'entity_type': attr['entity_type'],
'object_type_id': object_type_id
'object_type_id': object_type["id"]
}
@ -573,42 +646,3 @@ def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
CustomAttributes(session, plugins_presets).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -1,369 +1,240 @@
import os
import sys
import logging
import argparse
import re
import ftrack_api
from pype.ftrack import BaseAction
from avalon import lib as avalonlib
from pype.ftrack.lib.io_nonsingleton import DbConnector
from pypeapp import config, Anatomy
class CreateFolders(BaseAction):
'''Custom action.'''
#: Action identifier.
identifier = 'create.folders'
#: Action label.
label = 'Create Folders'
#: Action Icon.
icon = '{}/ftrack/action_icons/CreateFolders.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
identifier = "create.folders"
label = "Create Folders"
icon = "{}/ftrack/action_icons/CreateFolders.svg".format(
os.environ.get("PYPE_STATICS_SERVER", "")
)
db = DbConnector()
def discover(self, session, entities, event):
''' Validation '''
if len(entities) != 1:
return False
not_allowed = ['assetversion', 'project']
not_allowed = ["assetversion", "project"]
if entities[0].entity_type.lower() in not_allowed:
return False
return True
def interface(self, session, entities, event):
if event['data'].get('values', {}):
if event["data"].get("values", {}):
return
entity = entities[0]
without_interface = True
for child in entity['children']:
if child['object_type']['name'].lower() != 'task':
for child in entity["children"]:
if child["object_type"]["name"].lower() != "task":
without_interface = False
break
self.without_interface = without_interface
if without_interface:
return
title = 'Create folders'
title = "Create folders"
entity_name = entity['name']
entity_name = entity["name"]
msg = (
'<h2>Do you want create folders also'
' for all children of "{}"?</h2>'
"<h2>Do you want create folders also"
" for all children of \"{}\"?</h2>"
)
if entity.entity_type.lower() == 'project':
entity_name = entity['full_name']
msg = msg.replace(' also', '')
msg += '<h3>(Project root won\'t be created if not checked)</h3>'
if entity.entity_type.lower() == "project":
entity_name = entity["full_name"]
msg = msg.replace(" also", "")
msg += "<h3>(Project root won't be created if not checked)</h3>"
items = []
item_msg = {
'type': 'label',
'value': msg.format(entity_name)
"type": "label",
"value": msg.format(entity_name)
}
item_label = {
'type': 'label',
'value': 'With all chilren entities'
"type": "label",
"value": "With all chilren entities"
}
item = {
'name': 'children_included',
'type': 'boolean',
'value': False
"name": "children_included",
"type": "boolean",
"value": False
}
items.append(item_msg)
items.append(item_label)
items.append(item)
if len(items) == 0:
return {
'success': False,
'message': 'Didn\'t found any running jobs'
}
else:
return {
'items': items,
'title': title
}
return {
"items": items,
"title": title
}
def launch(self, session, entities, event):
'''Callback method for custom action.'''
with_childrens = True
if self.without_interface is False:
if 'values' not in event['data']:
if "values" not in event["data"]:
return
with_childrens = event['data']['values']['children_included']
with_childrens = event["data"]["values"]["children_included"]
entity = entities[0]
if entity.entity_type.lower() == 'project':
if entity.entity_type.lower() == "project":
proj = entity
else:
proj = entity['project']
project_name = proj['full_name']
project_code = proj['name']
if entity.entity_type.lower() == 'project' and with_childrens == False:
proj = entity["project"]
project_name = proj["full_name"]
project_code = proj["name"]
if entity.entity_type.lower() == 'project' and with_childrens is False:
return {
'success': True,
'message': 'Nothing was created'
}
data = {
"root": os.environ["AVALON_PROJECTS"],
"project": {
"name": project_name,
"code": project_code
}
}
all_entities = []
all_entities.append(entity)
if with_childrens:
all_entities = self.get_notask_children(entity)
av_project = None
try:
self.db.install()
self.db.Session['AVALON_PROJECT'] = project_name
av_project = self.db.find_one({'type': 'project'})
template_work = av_project['config']['template']['work']
template_publish = av_project['config']['template']['publish']
self.db.uninstall()
except Exception:
templates = Anatomy().templates
template_work = templates["avalon"]["work"]
template_publish = templates["avalon"]["publish"]
anatomy = Anatomy(project_name)
work_keys = ["work", "folder"]
work_template = anatomy.templates
for key in work_keys:
work_template = work_template[key]
work_has_apps = "{app" in work_template
publish_keys = ["publish", "folder"]
publish_template = anatomy.templates
for key in publish_keys:
publish_template = publish_template[key]
publish_has_apps = "{app" in publish_template
presets = config.get_presets()
app_presets = presets.get("tools", {}).get("sw_folders")
cached_apps = {}
collected_paths = []
presets = config.get_presets()['tools']['sw_folders']
for entity in all_entities:
if entity.entity_type.lower() == 'project':
if entity.entity_type.lower() == "project":
continue
ent_data = data.copy()
ent_data = {
"project": {
"name": project_name,
"code": project_code
}
}
asset_name = entity['name']
ent_data['asset'] = asset_name
ent_data["asset"] = entity["name"]
parents = entity['link']
hierarchy_names = [p['name'] for p in parents[1:-1]]
hierarchy = ''
parents = entity["link"][1:-1]
hierarchy_names = [p["name"] for p in parents]
hierarchy = ""
if hierarchy_names:
hierarchy = os.path.sep.join(hierarchy_names)
ent_data['hierarchy'] = hierarchy
ent_data["hierarchy"] = hierarchy
tasks_created = False
if entity['children']:
for child in entity['children']:
if child['object_type']['name'].lower() != 'task':
continue
tasks_created = True
task_type_name = child['type']['name'].lower()
task_data = ent_data.copy()
task_data['task'] = child['name']
possible_apps = presets.get(task_type_name, [])
template_work_created = False
template_publish_created = False
apps = []
for child in entity["children"]:
if child["object_type"]["name"].lower() != "task":
continue
tasks_created = True
task_type_name = child["type"]["name"].lower()
task_data = ent_data.copy()
task_data["task"] = child["name"]
apps = []
if app_presets and (work_has_apps or publish_has_apps):
possible_apps = app_presets.get(task_type_name, [])
for app in possible_apps:
try:
app_data = avalonlib.get_application(app)
app_dir = app_data['application_dir']
except ValueError:
app_dir = app
if app in cached_apps:
app_dir = cached_apps[app]
else:
try:
app_data = avalonlib.get_application(app)
app_dir = app_data["application_dir"]
except ValueError:
app_dir = app
cached_apps[app] = app_dir
apps.append(app_dir)
# Template wok
if '{app}' in template_work:
for app in apps:
template_work_created = True
app_data = task_data.copy()
app_data['app'] = app
collected_paths.append(
self.compute_template(
template_work, app_data
)
)
if template_work_created is False:
collected_paths.append(
self.compute_template(template_work, task_data)
)
# Template publish
if '{app}' in template_publish:
for app in apps:
template_publish_created = True
app_data = task_data.copy()
app_data['app'] = app
collected_paths.append(
self.compute_template(
template_publish, app_data, True
)
)
if template_publish_created is False:
collected_paths.append(
self.compute_template(
template_publish, task_data, True
)
)
# Template wok
if work_has_apps:
app_data = task_data.copy()
for app in apps:
app_data["app"] = app
collected_paths.append(self.compute_template(
anatomy, app_data, work_keys
))
else:
collected_paths.append(self.compute_template(
anatomy, task_data, work_keys
))
# Template publish
if publish_has_apps:
app_data = task_data.copy()
for app in apps:
app_data["app"] = app
collected_paths.append(self.compute_template(
anatomy, app_data, publish_keys
))
else:
collected_paths.append(self.compute_template(
anatomy, task_data, publish_keys
))
if not tasks_created:
# create path for entity
collected_paths.append(
self.compute_template(template_work, ent_data)
)
collected_paths.append(
self.compute_template(template_publish, ent_data)
)
if len(collected_paths) > 0:
self.log.info('Creating folders:')
collected_paths.append(self.compute_template(
anatomy, ent_data, work_keys
))
collected_paths.append(self.compute_template(
anatomy, ent_data, publish_keys
))
if len(collected_paths) == 0:
return {
"success": True,
"message": "No project folders to create."
}
self.log.info("Creating folders:")
for path in set(collected_paths):
self.log.info(path)
if not os.path.exists(path):
os.makedirs(path)
return {
'success': True,
'message': 'Created Folders Successfully!'
"success": True,
"message": "Successfully created project folders."
}
def get_notask_children(self, entity):
output = []
if entity.get('object_type', {}).get(
'name', entity.entity_type
).lower() == 'task':
if entity.entity_type.lower() == "task":
return output
else:
output.append(entity)
if entity['children']:
for child in entity['children']:
output.extend(self.get_notask_children(child))
output.append(entity)
for child in entity["children"]:
output.extend(self.get_notask_children(child))
return output
def template_format(self, template, data):
def compute_template(self, anatomy, data, anatomy_keys):
filled_template = anatomy.format_all(data)
for key in anatomy_keys:
filled_template = filled_template[key]
partial_data = PartialDict(data)
if filled_template.solved:
return os.path.normpath(filled_template)
# remove subdict items from string (like 'project[name]')
subdict = PartialDict()
count = 1
store_pattern = 5*'_'+'{:0>3}'
regex_patern = "\{\w*\[[^\}]*\]\}"
matches = re.findall(regex_patern, template)
for match in matches:
key = store_pattern.format(count)
subdict[key] = match
template = template.replace(match, '{'+key+'}')
count += 1
# solve fillind keys with optional keys
solved = self._solve_with_optional(template, partial_data)
# try to solve subdict and replace them back to string
for k, v in subdict.items():
try:
v = v.format_map(data)
except (KeyError, TypeError):
pass
subdict[k] = v
return solved.format_map(subdict)
def _solve_with_optional(self, template, data):
# Remove optional missing keys
pattern = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
invalid_optionals = []
for group in pattern.findall(template):
try:
group.format(**data)
except KeyError:
invalid_optionals.append(group)
for group in invalid_optionals:
template = template.replace(group, "")
solved = template.format_map(data)
# solving after format optional in second round
for catch in re.compile(r"(<.*?[^{0]*>)[^0-9]*?").findall(solved):
if "{" in catch:
# remove all optional
solved = solved.replace(catch, "")
else:
# Remove optional symbols
solved = solved.replace(catch, catch[1:-1])
return solved
def compute_template(self, str, data, task=False):
first_result = self.template_format(str, data)
if first_result == first_result.split('{')[0]:
return os.path.normpath(first_result)
if task:
return os.path.normpath(first_result.split('{')[0])
index = first_result.index('{')
regex = '\{\w*[^\}]*\}'
match = re.findall(regex, first_result[index:])[0]
without_missing = str.split(match)[0].split('}')
output_items = []
for part in without_missing:
if '{' in part:
output_items.append(part + '}')
return os.path.normpath(
self.template_format(''.join(output_items), data)
self.log.warning(
"Template \"{}\" was not fully filled \"{}\"".format(
filled_template.template, filled_template
)
)
class PartialDict(dict):
def __getitem__(self, item):
out = super().__getitem__(item)
if isinstance(out, dict):
return '{'+item+'}'
return out
def __missing__(self, key):
return '{'+key+'}'
return os.path.normpath(filled_template.split("{")[0])
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
"""Register plugin. Called when used as an plugin."""
CreateFolders(session, plugins_presets).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -1,36 +1,67 @@
import os
import sys
import re
import argparse
import logging
import ftrack_api
from pype.ftrack import BaseAction
from pypeapp import config
from pypeapp import config, Anatomy
class CreateProjectFolders(BaseAction):
'''Edit meta data action.'''
"""Action create folder structure and may create hierarchy in Ftrack.
#: Action identifier.
identifier = 'create.project.structure'
#: Action label.
label = 'Create Project Structure'
#: Action description.
description = 'Creates folder structure'
#: roles that are allowed to register this action
role_list = ['Pypeclub', 'Administrator', 'Project Manager']
icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
Creation of folder structure and hierarchy in Ftrack is based on presets.
These presets are located in:
`~/pype-config/presets/tools/project_folder_structure.json`
Example of content:
```json
{
"__project_root__": {
"prod" : {},
"resources" : {
"footage": {
"plates": {},
"offline": {}
},
"audio": {},
"art_dept": {}
},
"editorial" : {},
"assets[ftrack.Library]": {
"characters[ftrack]": {},
"locations[ftrack]": {}
},
"shots[ftrack.Sequence]": {
"scripts": {},
"editorial[ftrack.Folder]": {}
}
}
}
```
Key "__project_root__" indicates root folder (or entity). Each key in
dictionary represents folder name. Value may contain another dictionary
with subfolders.
Identifier `[ftrack]` in name says that this should be also created in
Ftrack hierarchy. It is possible to specify entity type of item with "." .
If key is `assets[ftrack.Library]` then in ftrack will be created entity
with name "assets" and entity type "Library". It is expected Library entity
type exist in Ftrack.
"""
identifier = "create.project.structure"
label = "Create Project Structure"
description = "Creates folder structure"
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = "{}/ftrack/action_icons/CreateProjectFolders.svg".format(
os.environ.get("PYPE_STATICS_SERVER", "")
)
pattern_array = re.compile('\[.*\]')
pattern_ftrack = '.*\[[.]*ftrack[.]*'
pattern_ent_ftrack = 'ftrack\.[^.,\],\s,]*'
project_root_key = '__project_root__'
pattern_array = re.compile(r"\[.*\]")
pattern_ftrack = re.compile(r".*\[[.]*ftrack[.]*")
pattern_ent_ftrack = re.compile(r"ftrack\.[^.,\],\s,]*")
project_root_key = "__project_root__"
def discover(self, session, entities, event):
''' Validation '''
if len(entities) != 1:
return False
@ -41,22 +72,30 @@ class CreateProjectFolders(BaseAction):
def launch(self, session, entities, event):
entity = entities[0]
if entity.entity_type.lower() == 'project':
project = entity
else:
project = entity['project']
project = self.get_project_from_entity(entity)
project_folder_presets = (
config.get_presets()
.get("tools", {})
.get("project_folder_structure")
)
if not project_folder_presets:
return {
"success": False,
"message": "Project structure presets are not set."
}
presets = config.get_presets()['tools']['project_folder_structure']
try:
# Get paths based on presets
basic_paths = self.get_path_items(presets)
self.create_folders(basic_paths, entity)
basic_paths = self.get_path_items(project_folder_presets)
anatomy = Anatomy(project["full_name"])
self.create_folders(basic_paths, entity, project, anatomy)
self.create_ftrack_entities(basic_paths, project)
except Exception as e:
except Exception as exc:
session.rollback()
return {
'success': False,
'message': str(e)
"success": False,
"message": str(exc)
}
return True
@ -113,15 +152,15 @@ class CreateProjectFolders(BaseAction):
def trigger_creation(self, separation, parent):
for item, subvalues in separation.items():
matches = re.findall(self.pattern_array, item)
ent_type = 'Folder'
ent_type = "Folder"
if len(matches) == 0:
name = item
else:
match = matches[0]
name = item.replace(match, '')
name = item.replace(match, "")
ent_type_match = re.findall(self.pattern_ent_ftrack, match)
if len(ent_type_match) > 0:
ent_type_split = ent_type_match[0].split('.')
ent_type_split = ent_type_match[0].split(".")
if len(ent_type_split) == 2:
ent_type = ent_type_split[1]
new_parent = self.create_ftrack_entity(name, ent_type, parent)
@ -130,22 +169,22 @@ class CreateProjectFolders(BaseAction):
self.trigger_creation(subvalue, new_parent)
def create_ftrack_entity(self, name, ent_type, parent):
for children in parent['children']:
if children['name'] == name:
for children in parent["children"]:
if children["name"] == name:
return children
data = {
'name': name,
'parent_id': parent['id']
"name": name,
"parent_id": parent["id"]
}
if parent.entity_type.lower() == 'project':
data['project_id'] = parent['id']
if parent.entity_type.lower() == "project":
data["project_id"] = parent["id"]
else:
data['project_id'] = parent['project']['id']
data["project_id"] = parent["project"]["id"]
existing_entity = self.session.query((
"TypedContext where name is \"{}\" and "
"parent_id is \"{}\" and project_id is \"{}\""
).format(name, data['parent_id'], data['project_id'])).first()
).format(name, data["parent_id"], data["project_id"])).first()
if existing_entity:
return existing_entity
@ -161,12 +200,11 @@ class CreateProjectFolders(BaseAction):
else:
paths = self.get_path_items(value)
for path in paths:
if isinstance(path, str):
output.append([key, path])
else:
p = [key]
p.extend(path)
output.append(p)
if not isinstance(path, (list, tuple)):
path = [path]
output.append([key, *path])
return output
def compute_paths(self, basic_paths_items, project_root):
@ -176,72 +214,30 @@ class CreateProjectFolders(BaseAction):
for path_item in path_items:
matches = re.findall(self.pattern_array, path_item)
if len(matches) > 0:
path_item = path_item.replace(matches[0], '')
path_item = path_item.replace(matches[0], "")
if path_item == self.project_root_key:
path_item = project_root
clean_items.append(path_item)
output.append(os.path.normpath(os.path.sep.join(clean_items)))
return output
def create_folders(self, basic_paths, entity):
# Set project root folder
if entity.entity_type.lower() == 'project':
project_name = entity['full_name']
def create_folders(self, basic_paths, entity, project, anatomy):
roots_paths = []
if isinstance(anatomy.roots, dict):
for root in anatomy.roots:
roots_paths.append(root.value)
else:
project_name = entity['project']['full_name']
project_root_items = [os.environ['AVALON_PROJECTS'], project_name]
project_root = os.path.sep.join(project_root_items)
full_paths = self.compute_paths(basic_paths, project_root)
#Create folders
for path in full_paths:
if os.path.exists(path):
continue
os.makedirs(path.format(project_root=project_root))
roots_paths.append(anatomy.roots.value)
for root_path in roots_paths:
project_root = os.path.join(root_path, project["full_name"])
full_paths = self.compute_paths(basic_paths, project_root)
# Create folders
for path in full_paths:
if os.path.exists(path):
continue
os.makedirs(path.format(project_root=project_root))
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
CreateProjectFolders(session, plugins_presets).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -7,6 +7,7 @@ from pymongo import UpdateOne
from pype.ftrack import BaseAction
from pype.ftrack.lib.io_nonsingleton import DbConnector
from pypeapp import Anatomy
import avalon.pipeline
@ -21,8 +22,8 @@ class DeleteOldVersions(BaseAction):
" archived with only lates versions."
)
role_list = ["Pypeclub", "Project Manager", "Administrator"]
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
icon = "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get("PYPE_STATICS_SERVER", "")
)
dbcon = DbConnector()
@ -41,36 +42,8 @@ class DeleteOldVersions(BaseAction):
return False
def interface(self, session, entities, event):
# TODO Add roots existence validation
items = []
root = os.environ.get("AVALON_PROJECTS")
if not root:
msg = "Root path to projects is not set."
items.append({
"type": "label",
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
})
self.show_interface(
items=items, title=self.inteface_title, event=event
)
return {
"success": False,
"message": msg
}
if not os.path.exists(root):
msg = "Root path does not exists \"{}\".".format(str(root))
items.append({
"type": "label",
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
})
self.show_interface(
items=items, title=self.inteface_title, event=event
)
return {
"success": False,
"message": msg
}
values = event["data"].get("values")
if values:
versions_count = int(values["last_versions_count"])
@ -194,6 +167,7 @@ class DeleteOldVersions(BaseAction):
# Set Mongo collection
project_name = project["full_name"]
anatomy = Anatomy(project_name)
self.dbcon.Session["AVALON_PROJECT"] = project_name
self.log.debug("Project is set to {}".format(project_name))
@ -307,7 +281,7 @@ class DeleteOldVersions(BaseAction):
dir_paths = {}
file_paths_by_dir = collections.defaultdict(list)
for repre in repres:
file_path, seq_path = self.path_from_represenation(repre)
file_path, seq_path = self.path_from_represenation(repre, anatomy)
if file_path is None:
self.log.warning((
"Could not format path for represenation \"{}\""
@ -495,21 +469,17 @@ class DeleteOldVersions(BaseAction):
self.log.debug("Removed folder: {}".format(dir_path))
os.rmdir(dir_path)
def path_from_represenation(self, representation):
def path_from_represenation(self, representation, anatomy):
try:
template = representation["data"]["template"]
except KeyError:
return (None, None)
root = os.environ["AVALON_PROJECTS"]
if not root:
return (None, None)
sequence_path = None
try:
context = representation["context"]
context["root"] = root
context["root"] = anatomy.roots
path = avalon.pipeline.format_template_with_optional_keys(
context, template
)

View file

@ -2,7 +2,6 @@ import os
import copy
import shutil
import collections
import string
import clique
from bson.objectid import ObjectId
@ -17,24 +16,18 @@ from pype.ftrack.lib.avalon_sync import CustAttrIdKey
class Delivery(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = "delivery.action"
#: Action label.
label = "Delivery"
#: Action description.
description = "Deliver data to client"
#: roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project manager"]
icon = '{}/ftrack/action_icons/Delivery.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
icon = "{}/ftrack/action_icons/Delivery.svg".format(
os.environ.get("PYPE_STATICS_SERVER", "")
)
db_con = DbConnector()
def discover(self, session, entities, event):
''' Validation '''
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
@ -301,17 +294,10 @@ class Delivery(BaseAction):
repre = repres_by_name.get(comp_name)
repres_to_deliver.append(repre)
if not location_path:
location_path = os.environ.get("AVALON_PROJECTS") or ""
print(location_path)
anatomy = Anatomy(project_name)
for repre in repres_to_deliver:
# Get destination repre path
anatomy_data = copy.deepcopy(repre["context"])
anatomy_data["root"] = location_path
anatomy_filled = anatomy.format_all(anatomy_data)
test_path = anatomy_filled["delivery"][anatomy_name]
@ -341,7 +327,7 @@ class Delivery(BaseAction):
self.report_items[msg].append(sub_msg)
self.log.warning(
"{} Representation: \"{}\" Filled: <{}>".format(
msg, str(repre["_id"]), str(result)
msg, str(repre["_id"]), str(test_path)
)
)
continue
@ -352,9 +338,9 @@ class Delivery(BaseAction):
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
repre_path = self.path_from_represenation(repre)
repre_path = self.path_from_represenation(repre, anatomy)
# TODO add backup solution where root of path from component
# is repalced with AVALON_PROJECTS root
# is repalced with root
if not frame:
self.process_single_file(
repre_path, anatomy, anatomy_name, anatomy_data
@ -452,7 +438,7 @@ class Delivery(BaseAction):
self.copy_file(src, dst)
def path_from_represenation(self, representation):
def path_from_represenation(self, representation, anatomy):
try:
template = representation["data"]["template"]
@ -461,7 +447,7 @@ class Delivery(BaseAction):
try:
context = representation["context"]
context["root"] = os.environ.get("AVALON_PROJECTS") or ""
context["root"] = anatomy.roots
path = pipeline.format_template_with_optional_keys(
context, template
)

View file

@ -1,9 +1,8 @@
import os
import json
from ruamel import yaml
from pype.ftrack import BaseAction
from pypeapp import config
from pypeapp import config, Anatomy, project_overrides_dir_path
from pype.ftrack.lib.avalon_sync import get_avalon_attr
@ -24,6 +23,7 @@ class PrepareProject(BaseAction):
# Key to store info about trigerring create folder structure
create_project_structure_key = "create_folder_structure"
item_splitter = {'type': 'label', 'value': '---'}
def discover(self, session, entities, event):
''' Validation '''
@ -41,15 +41,190 @@ class PrepareProject(BaseAction):
# Inform user that this may take a while
self.show_message(event, "Preparing data... Please wait", True)
self.log.debug("Preparing data which will be shown")
self.log.debug("Loading custom attributes")
cust_attrs, hier_cust_attrs = get_avalon_attr(session, True)
project_defaults = config.get_presets(
entities[0]["full_name"]
).get("ftrack", {}).get("project_defaults", {})
self.log.debug("Preparing data which will be shown")
project_name = entities[0]["full_name"]
project_defaults = (
config.get_presets(project_name)
.get("ftrack", {})
.get("project_defaults", {})
)
anatomy = Anatomy(project_name)
if not anatomy.roots:
return {
"success": False,
"message": (
"Have issues with loading Roots for project \"{}\"."
).format(anatomy.project_name)
}
root_items = self.prepare_root_items(anatomy)
ca_items, multiselect_enumerators = (
self.prepare_custom_attribute_items(project_defaults)
)
self.log.debug("Heavy items are ready. Preparing last items group.")
title = "Prepare Project"
items = []
# Add root items
items.extend(root_items)
items.append(self.item_splitter)
# Ask if want to trigger Action Create Folder Structure
items.append({
"type": "label",
"value": "<h3>Want to create basic Folder Structure?</h3>"
})
items.append({
"name": self.create_project_structure_key,
"type": "boolean",
"value": False,
"label": "Check if Yes"
})
items.append(self.item_splitter)
items.append({
"type": "label",
"value": "<h3>Set basic Attributes:</h3>"
})
items.extend(ca_items)
# This item will be last (before enumerators)
# - sets value of auto synchronization
auto_sync_name = "avalon_auto_sync"
auto_sync_item = {
"name": auto_sync_name,
"type": "boolean",
"value": project_defaults.get(auto_sync_name, False),
"label": "AutoSync to Avalon"
}
# Add autosync attribute
items.append(auto_sync_item)
# Add enumerator items at the end
for item in multiselect_enumerators:
items.append(item)
return {
"items": items,
"title": title
}
def prepare_root_items(self, anatomy):
root_items = []
self.log.debug("Root items preparation begins.")
root_names = anatomy.root_names()
roots = anatomy.roots
root_items.append({
"type": "label",
"value": "<h3>Check your Project root settings</h3>"
})
root_items.append({
"type": "label",
"value": (
"<p><i>NOTE: Roots are <b>crutial</b> for path filling"
" (and creating folder structure).</i></p>"
)
})
root_items.append({
"type": "label",
"value": (
"<p><i>WARNING: Do not change roots on running project,"
" that <b>will cause workflow issues</b>.</i></p>"
)
})
default_roots = anatomy.roots
while isinstance(default_roots, dict):
key = tuple(default_roots.keys())[0]
default_roots = default_roots[key]
empty_text = "Enter root path here..."
# Root names is None when anatomy templates contain "{root}"
all_platforms = ["windows", "linux", "darwin"]
if root_names is None:
root_items.append(self.item_splitter)
# find first possible key
for platform in all_platforms:
value = default_roots.raw_data.get(platform) or ""
root_items.append({
"label": platform,
"name": "__root__{}".format(platform),
"type": "text",
"value": value,
"empty_text": empty_text
})
return root_items
root_name_data = {}
missing_roots = []
for root_name in root_names:
root_name_data[root_name] = {}
if not isinstance(roots, dict):
missing_roots.append(root_name)
continue
root_item = roots.get(root_name)
if not root_item:
missing_roots.append(root_name)
continue
for platform in all_platforms:
root_name_data[root_name][platform] = (
root_item.raw_data.get(platform) or ""
)
if missing_roots:
default_values = {}
for platform in all_platforms:
default_values[platform] = (
default_roots.raw_data.get(platform) or ""
)
for root_name in missing_roots:
root_name_data[root_name] = default_values
root_names = list(root_name_data.keys())
root_items.append({
"type": "hidden",
"name": "__rootnames__",
"value": json.dumps(root_names)
})
for root_name, values in root_name_data.items():
root_items.append(self.item_splitter)
root_items.append({
"type": "label",
"value": "Root: \"{}\"".format(root_name)
})
for platform, value in values.items():
root_items.append({
"label": platform,
"name": "__root__{}{}".format(root_name, platform),
"type": "text",
"value": value,
"empty_text": empty_text
})
self.log.debug("Root items preparation ended.")
return root_items
def _attributes_to_set(self, project_defaults):
attributes_to_set = {}
cust_attrs, hier_cust_attrs = get_avalon_attr(self.session, True)
for attr in hier_cust_attrs:
key = attr["key"]
if key.startswith("avalon_"):
@ -77,45 +252,17 @@ class PrepareProject(BaseAction):
attributes_to_set.items(),
key=lambda x: x[1]["label"]
))
return attributes_to_set
def prepare_custom_attribute_items(self, project_defaults):
items = []
multiselect_enumerators = []
attributes_to_set = self._attributes_to_set(project_defaults)
self.log.debug("Preparing interface for keys: \"{}\"".format(
str([key for key in attributes_to_set])
))
item_splitter = {'type': 'label', 'value': '---'}
title = "Prepare Project"
items = []
# Ask if want to trigger Action Create Folder Structure
items.append({
"type": "label",
"value": "<h3>Want to create basic Folder Structure?</h3>"
})
items.append({
"name": self.create_project_structure_key,
"type": "boolean",
"value": False,
"label": "Check if Yes"
})
items.append(item_splitter)
items.append({
"type": "label",
"value": "<h3>Set basic Attributes:</h3>"
})
multiselect_enumerators = []
# This item will be last (before enumerators)
# - sets value of auto synchronization
auto_sync_name = "avalon_auto_sync"
auto_sync_item = {
"name": auto_sync_name,
"type": "boolean",
"value": project_defaults.get(auto_sync_name, False),
"label": "AutoSync to Avalon"
}
for key, in_data in attributes_to_set.items():
attr = in_data["object"]
@ -139,8 +286,7 @@ class PrepareProject(BaseAction):
attr_config_data = json.loads(attr_config["data"])
if attr_config["multiSelect"] is True:
multiselect_enumerators.append(item_splitter)
multiselect_enumerators.append(self.item_splitter)
multiselect_enumerators.append({
"type": "label",
"value": in_data["label"]
@ -160,10 +306,7 @@ class PrepareProject(BaseAction):
"label": "- {}".format(option["menu"])
}
if default:
if (
isinstance(default, list) or
isinstance(default, tuple)
):
if isinstance(default, (list, tuple)):
if name in default:
item["value"] = True
else:
@ -204,17 +347,7 @@ class PrepareProject(BaseAction):
items.append(item)
# Add autosync attribute
items.append(auto_sync_item)
# Add enumerator items at the end
for item in multiselect_enumerators:
items.append(item)
return {
'items': items,
'title': title
}
return items, multiselect_enumerators
def launch(self, session, entities, event):
if not event['data'].get('values', {}):
@ -222,6 +355,35 @@ class PrepareProject(BaseAction):
in_data = event['data']['values']
root_values = {}
root_key = "__root__"
for key in tuple(in_data.keys()):
if key.startswith(root_key):
_key = key[len(root_key):]
root_values[_key] = in_data.pop(key)
root_names = in_data.pop("__rootnames__", None)
root_data = {}
if root_names:
for root_name in json.loads(root_names):
root_data[root_name] = {}
for key, value in tuple(root_values.items()):
if key.startswith(root_name):
_key = key[len(root_name):]
root_data[root_name][_key] = value
else:
for key, value in root_values.items():
root_data[key] = value
project_name = entities[0]["full_name"]
anatomy = Anatomy(project_name)
anatomy.templates_obj.save_project_overrides(project_name)
anatomy.roots_obj.save_project_overrides(
project_name, root_data, override=True
)
anatomy.reset()
# pop out info about creating project structure
create_proj_struct = in_data.pop(self.create_project_structure_key)
@ -269,94 +431,22 @@ class PrepareProject(BaseAction):
def create_project_specific_config(self, project_name, json_data):
self.log.debug("*** Creating project specifig configs ***")
path_proj_configs = os.environ.get('PYPE_PROJECT_CONFIGS', "")
# Skip if PYPE_PROJECT_CONFIGS is not set
# TODO show user OS message
if not path_proj_configs:
self.log.warning((
"Environment variable \"PYPE_PROJECT_CONFIGS\" is not set."
" Project specific config can't be set."
))
return
path_proj_configs = os.path.normpath(path_proj_configs)
# Skip if path does not exist
# TODO create if not exist?!!!
if not os.path.exists(path_proj_configs):
self.log.warning((
"Path set in Environment variable \"PYPE_PROJECT_CONFIGS\""
" Does not exist."
))
return
project_specific_path = os.path.normpath(
os.path.join(path_proj_configs, project_name)
)
project_specific_path = project_overrides_dir_path(project_name)
if not os.path.exists(project_specific_path):
os.makedirs(project_specific_path)
self.log.debug((
"Project specific config folder for project \"{}\" created."
).format(project_name))
# Anatomy ####################################
self.log.debug("--- Processing Anatomy Begins: ---")
anatomy_dir = os.path.normpath(os.path.join(
project_specific_path, "anatomy"
))
anatomy_path = os.path.normpath(os.path.join(
anatomy_dir, "default.yaml"
))
anatomy = None
if os.path.exists(anatomy_path):
self.log.debug(
"Anatomy file already exist. Trying to read: \"{}\"".format(
anatomy_path
)
)
# Try to load data
with open(anatomy_path, 'r') as file_stream:
try:
anatomy = yaml.load(file_stream, Loader=yaml.loader.Loader)
self.log.debug("Reading Anatomy file was successful")
except yaml.YAMLError as exc:
self.log.warning(
"Reading Yaml file failed: \"{}\"".format(anatomy_path),
exc_info=True
)
if not anatomy:
self.log.debug("Anatomy is not set. Duplicating default.")
# Create Anatomy folder
if not os.path.exists(anatomy_dir):
self.log.debug(
"Creating Anatomy folder: \"{}\"".format(anatomy_dir)
)
os.makedirs(anatomy_dir)
source_items = [
os.environ["PYPE_CONFIG"], "anatomy", "default.yaml"
]
source_path = os.path.normpath(os.path.join(*source_items))
with open(source_path, 'r') as file_stream:
source_data = file_stream.read()
with open(anatomy_path, 'w') as file_stream:
file_stream.write(source_data)
# Presets ####################################
self.log.debug("--- Processing Presets Begins: ---")
project_defaults_dir = os.path.normpath(os.path.join(*[
project_defaults_dir = os.path.normpath(os.path.join(
project_specific_path, "presets", "ftrack"
]))
project_defaults_path = os.path.normpath(os.path.join(*[
))
project_defaults_path = os.path.normpath(os.path.join(
project_defaults_dir, "project_defaults.json"
]))
))
# Create folder if not exist
if not os.path.exists(project_defaults_dir):
self.log.debug("Creating Ftrack Presets folder: \"{}\"".format(
@ -372,5 +462,4 @@ class PrepareProject(BaseAction):
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
PrepareProject(session, plugins_presets).register()

View file

@ -52,41 +52,6 @@ class StoreThumbnailsToAvalon(BaseAction):
})
session.commit()
thumbnail_roots = os.environ.get(self.thumbnail_key)
if not thumbnail_roots:
msg = "`{}` environment is not set".format(self.thumbnail_key)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
existing_thumbnail_root = None
for path in thumbnail_roots.split(os.pathsep):
if os.path.exists(path):
existing_thumbnail_root = path
break
if existing_thumbnail_root is None:
msg = (
"Can't access paths, set in `{}` ({})"
).format(self.thumbnail_key, thumbnail_roots)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
project = self.get_project_from_entity(entities[0])
project_name = project["full_name"]
anatomy = Anatomy(project_name)
@ -120,6 +85,44 @@ class StoreThumbnailsToAvalon(BaseAction):
"message": msg
}
thumbnail_roots = os.environ.get(self.thumbnail_key)
if (
"{thumbnail_root}" in anatomy.templates["publish"]["thumbnail"]
and not thumbnail_roots
):
msg = "`{}` environment is not set".format(self.thumbnail_key)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
existing_thumbnail_root = None
for path in thumbnail_roots.split(os.pathsep):
if os.path.exists(path):
existing_thumbnail_root = path
break
if existing_thumbnail_root is None:
msg = (
"Can't access paths, set in `{}` ({})"
).format(self.thumbnail_key, thumbnail_roots)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
example_template_data = {
"_id": "ID",
"thumbnail_root": "THUBMNAIL_ROOT",

View file

@ -1244,15 +1244,17 @@ class SyncToAvalonEvent(BaseEvent):
self.process_session, entity, hier_keys, defaults
)
for key, val in hier_values.items():
if key == CustAttrIdKey:
continue
output[key] = val
return output
def process_renamed(self):
if not self.ftrack_renamed:
ent_infos = self.ftrack_renamed
if not ent_infos:
return
ent_infos = self.ftrack_renamed
renamed_tasks = {}
not_found = {}
changeable_queue = queue.Queue()
@ -1274,9 +1276,9 @@ class SyncToAvalonEvent(BaseEvent):
if not avalon_ent:
# TODO logging
self.log.debug((
"Can't change the name (Entity is not is avalon) <{}>"
"Entity is not is avalon. Moving to \"add\" process. <{}>"
).format(ent_path))
not_found[ftrack_id] = ent_info
self.ftrack_added[ftrack_id] = ent_info
continue
if new_name == avalon_ent["name"]:
@ -1454,7 +1456,6 @@ class SyncToAvalonEvent(BaseEvent):
# - happen when was created by any sync event/action
pop_out_ents = []
new_tasks_by_parent = collections.defaultdict(list)
_new_ent_infos = {}
for ftrack_id, ent_info in ent_infos.items():
if self.avalon_ents_by_ftrack_id.get(ftrack_id):
pop_out_ents.append(ftrack_id)
@ -1558,36 +1559,20 @@ class SyncToAvalonEvent(BaseEvent):
pop_out_ents.append(ftrack_id)
continue
configuration_id = entity_type_conf_ids.get(entity_type)
if not configuration_id:
for attr in cust_attrs:
key = attr["key"]
if key != CustAttrIdKey:
continue
if attr["entity_type"] != ent_info["entityType"]:
continue
if (
ent_info["entityType"] == "task" and
attr["object_type_id"] != ent_info["objectTypeId"]
):
continue
configuration_id = attr["id"]
entity_type_conf_ids[entity_type] = configuration_id
break
if not configuration_id:
self.log.warning(
"BUG REPORT: Missing configuration for `{} < {} >`".format(
entity_type, ent_info["entityType"]
)
)
mongo_id_configuration_id = self._mongo_id_configuration(
ent_info,
cust_attrs,
hier_attrs,
entity_type_conf_ids
)
if not mongo_id_configuration_id:
self.log.warning((
"BUG REPORT: Missing MongoID configuration for `{} < {} >`"
).format(entity_type, ent_info["entityType"]))
continue
_entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"configuration_id": mongo_id_configuration_id,
"entity_id": ftrack_id
})
@ -1690,6 +1675,53 @@ class SyncToAvalonEvent(BaseEvent):
if new_name not in self.task_changes_by_avalon_id[mongo_id]:
self.task_changes_by_avalon_id[mongo_id].append(new_name)
def _mongo_id_configuration(
self,
ent_info,
cust_attrs,
hier_attrs,
temp_dict
):
# Use hierarchical mongo id attribute if possible.
if "_hierarchical" not in temp_dict:
hier_mongo_id_configuration_id = None
for attr in hier_attrs:
if attr["key"] == CustAttrIdKey:
hier_mongo_id_configuration_id = attr["id"]
break
temp_dict["_hierarchical"] = hier_mongo_id_configuration_id
hier_mongo_id_configuration_id = temp_dict.get("_hierarchical")
if hier_mongo_id_configuration_id is not None:
return hier_mongo_id_configuration_id
# Legacy part for cases that MongoID attribute is per entity type.
entity_type = ent_info["entity_type"]
mongo_id_configuration_id = temp_dict.get(entity_type)
if mongo_id_configuration_id is not None:
return mongo_id_configuration_id
for attr in cust_attrs:
key = attr["key"]
if key != CustAttrIdKey:
continue
if attr["entity_type"] != ent_info["entityType"]:
continue
if (
ent_info["entityType"] == "task" and
attr["object_type_id"] != ent_info["objectTypeId"]
):
continue
mongo_id_configuration_id = attr["id"]
break
temp_dict[entity_type] = mongo_id_configuration_id
return mongo_id_configuration_id
def process_moved(self):
if not self.ftrack_moved:
return
@ -1869,11 +1901,8 @@ class SyncToAvalonEvent(BaseEvent):
obj_type_id = ent_info["objectTypeId"]
ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id)
# Ftrack's entity_type does not have defined custom attributes
if ent_cust_attrs is None:
self.log.warning((
"BUG REPORT: Entity has ent type without"
" custom attributes <{}> \"{}\""
).format(entType, ent_info))
continue
for key, values in ent_info["changes"].items():

View file

@ -2,57 +2,42 @@ from pype.ftrack import BaseEvent
class ThumbnailEvents(BaseEvent):
def launch(self, session, event):
'''just a testing event'''
"""Updates thumbnails of entities from new AssetVersion."""
# self.log.info(event)
# start of event procedure ----------------------------------
for entity in event['data'].get('entities', []):
for entity in event["data"].get("entities", []):
if (
entity["action"] == "remove"
or entity["entityType"].lower() != "assetversion"
or "thumbid" not in (entity.get("keys") or [])
):
continue
# update created task thumbnail with first parent thumbnail
if entity['entityType'] == 'task' and entity['action'] == 'add':
version = session.get("AssetVersion", entity["entityId"])
if not version:
continue
task = session.get('TypedContext', entity['entityId'])
parent = task['parent']
thumbnail = version.get("thumbnail")
if not thumbnail:
continue
if parent.get('thumbnail') and not task.get('thumbnail'):
task['thumbnail'] = parent['thumbnail']
self.log.info('>>> Updated thumbnail on [ %s/%s ]'.format(
parent['name'], task['name']
))
parent = version["asset"]["parent"]
task = version["task"]
parent["thumbnail_id"] = version["thumbnail_id"]
if parent.entity_type.lower() == "project":
name = parent["full_name"]
else:
name = parent["name"]
# Update task thumbnail from published version
# if (entity['entityType'] == 'assetversion' and
# entity['action'] == 'encoded'):
elif (
entity['entityType'] == 'assetversion' and
entity['action'] != 'remove' and
'thumbid' in (entity.get('keys') or [])
):
task_msg = ""
if task:
task["thumbnail_id"] = version["thumbnail_id"]
task_msg = " and task [ {} ]".format(task["name"])
version = session.get('AssetVersion', entity['entityId'])
if not version:
continue
thumbnail = version.get('thumbnail')
if not thumbnail:
continue
parent = version['asset']['parent']
task = version['task']
parent['thumbnail_id'] = version['thumbnail_id']
if parent.entity_type.lower() == "project":
name = parent["full_name"]
else:
name = parent["name"]
msg = '>>> Updating thumbnail for shot [ {} ]'.format(name)
if task:
task['thumbnail_id'] = version['thumbnail_id']
msg += " and task [ {} ]".format(task["name"])
self.log.info(msg)
self.log.info(">>> Updating thumbnail for shot [ {} ]{}".format(
name, task_msg
))
try:
session.commit()
@ -61,5 +46,4 @@ class ThumbnailEvents(BaseEvent):
def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
ThumbnailEvents(session, plugins_presets).register()

View file

@ -158,20 +158,10 @@ class UserAssigmentEvent(BaseEvent):
"""
project_name = task['project']['full_name']
project_code = task['project']['name']
try:
root = os.environ['PYPE_STUDIO_PROJECTS_PATH']
except KeyError:
msg = 'Project ({}) root not set'.format(project_name)
self.log.error(msg)
return {
'success': False,
'message': msg
}
# fill in template data
asset = self._get_asset(task)
t_data = {
'root': root,
'project': {
'name': project_name,
'code': project_code
@ -204,11 +194,12 @@ class UserAssigmentEvent(BaseEvent):
data = self._get_template_data(task)
# format directories to pass to shell script
anatomy = Anatomy(data["project"]["name"])
anatomy_filled = anatomy.format(data)
# formatting work dir is easiest part as we can use whole path
work_dir = anatomy.format(data)['avalon']['work']
work_dir = anatomy_filled["work"]["folder"]
# we also need publish but not whole
filled_all = anatomy.format_all(data)
publish = filled_all['avalon']['publish']
anatomy_filled.strict = False
publish = anatomy_filled["publish"]["folder"]
# now find path to {asset}
m = re.search("(^.+?{})".format(data['asset']),

View file

@ -2,6 +2,7 @@ import os
import re
import queue
import collections
import copy
from pype.ftrack.lib.io_nonsingleton import DbConnector
@ -291,6 +292,8 @@ class SyncEntitiesFactory:
self.filtered_ids = []
self.not_selected_ids = []
self.hier_cust_attr_ids_by_key = {}
self._ent_paths_by_ftrack_id = {}
self.ftrack_avalon_mapper = None
@ -690,7 +693,6 @@ class SyncEntitiesFactory:
ent_type["name"]: ent_type["id"] for ent_type in ent_types
}
attrs = set()
# store default values per entity type
attrs_per_entity_type = collections.defaultdict(dict)
avalon_attrs = collections.defaultdict(dict)
@ -698,9 +700,10 @@ class SyncEntitiesFactory:
attrs_per_entity_type_ca_id = collections.defaultdict(dict)
avalon_attrs_ca_id = collections.defaultdict(dict)
attribute_key_by_id = {}
for cust_attr in custom_attrs:
key = cust_attr["key"]
attrs.add(cust_attr["id"])
attribute_key_by_id[cust_attr["id"]] = key
ca_ent_type = cust_attr["entity_type"]
if key.startswith("avalon_"):
if ca_ent_type == "show":
@ -754,19 +757,19 @@ class SyncEntitiesFactory:
prepared_avalon_attr_ca_id = avalon_attrs_ca_id.get(attr_key)
if prepared_attrs:
self.entities_dict[entity_id]["custom_attributes"] = (
prepared_attrs.copy()
copy.deepcopy(prepared_attrs)
)
if prepared_attrs_ca_id:
self.entities_dict[entity_id]["custom_attributes_id"] = (
prepared_attrs_ca_id.copy()
copy.deepcopy(prepared_attrs_ca_id)
)
if prepared_avalon_attr:
self.entities_dict[entity_id]["avalon_attrs"] = (
prepared_avalon_attr.copy()
copy.deepcopy(prepared_avalon_attr)
)
if prepared_avalon_attr_ca_id:
self.entities_dict[entity_id]["avalon_attrs_id"] = (
prepared_avalon_attr_ca_id.copy()
copy.deepcopy(prepared_avalon_attr_ca_id)
)
# TODO query custom attributes by entity_id
@ -774,7 +777,7 @@ class SyncEntitiesFactory:
"\"{}\"".format(id) for id in sync_ids
])
attributes_joined = ", ".join([
"\"{}\"".format(name) for name in attrs
"\"{}\"".format(attr_id) for attr_id in attribute_key_by_id.keys()
])
cust_attr_query = (
@ -792,13 +795,13 @@ class SyncEntitiesFactory:
else:
[values] = self.session._call(call_expr)
for value in values["data"]:
entity_id = value["entity_id"]
key = value["configuration"]["key"]
for item in values["data"]:
entity_id = item["entity_id"]
key = attribute_key_by_id[item["configuration_id"]]
store_key = "custom_attributes"
if key.startswith("avalon_"):
store_key = "avalon_attrs"
self.entities_dict[entity_id][store_key][key] = value["value"]
self.entities_dict[entity_id][store_key][key] = item["value"]
# process hierarchical attributes
self.set_hierarchical_attribute(hier_attrs, sync_ids)
@ -812,6 +815,7 @@ class SyncEntitiesFactory:
key = attr["key"]
attribute_key_by_id[attr["id"]] = key
attributes_by_key[key] = attr
self.hier_cust_attr_ids_by_key[key] = attr["id"]
store_key = "hier_attrs"
if key.startswith("avalon_"):
@ -821,6 +825,21 @@ class SyncEntitiesFactory:
attr["default"]
)
# Add attribute ids to entities dictionary
avalon_attribute_id_by_key = {
attr_key: attr_id
for attr_id, attr_key in attribute_key_by_id.items()
if attr_key.startswith("avalon_")
}
for entity_id in self.entities_dict.keys():
if "avalon_attrs_id" not in self.entities_dict[entity_id]:
self.entities_dict[entity_id]["avalon_attrs_id"] = {}
for attr_key, attr_id in avalon_attribute_id_by_key.items():
self.entities_dict[entity_id]["avalon_attrs_id"][attr_key] = (
attr_id
)
# Prepare dict with all hier keys and None values
prepare_dict = {}
prepare_dict_avalon = {}
@ -834,7 +853,7 @@ class SyncEntitiesFactory:
# Skip project because has stored defaults at the moment
if entity_dict["entity_type"] == "project":
continue
entity_dict["hier_attrs"] = prepare_dict.copy()
entity_dict["hier_attrs"] = copy.deepcopy(prepare_dict)
for key, val in prepare_dict_avalon.items():
entity_dict["avalon_attrs"][key] = val
@ -842,32 +861,40 @@ class SyncEntitiesFactory:
entity_ids_joined = ", ".join([
"\"{}\"".format(id) for id in sync_ids
])
attributes_joined = ", ".join([
"\"{}\"".format(attr_id) for attr_id in attribute_key_by_id.keys()
])
avalon_hier = []
for configuration_id in attribute_key_by_id.keys():
call_expr = [{
"action": "query",
"expression": (
"select value, entity_id from CustomAttributeValue "
"where entity_id in ({}) and configuration_id is \"{}\""
).format(entity_ids_joined, configuration_id)
}]
if hasattr(self.session, "call"):
[values] = self.session.call(call_expr)
else:
[values] = self.session._call(call_expr)
call_expr = [{
"action": "query",
"expression": (
"select value, entity_id from ContextCustomAttributeValue "
"where entity_id in ({}) and configuration_id in ({})"
).format(entity_ids_joined, attributes_joined)
}]
if hasattr(self.session, "call"):
[values] = self.session.call(call_expr)
else:
[values] = self.session._call(call_expr)
for value in values["data"]:
if value["value"] is None:
continue
entity_id = value["entity_id"]
key = attribute_key_by_id[value["configuration_id"]]
if key.startswith("avalon_"):
store_key = "avalon_attrs"
avalon_hier.append(key)
else:
store_key = "hier_attrs"
self.entities_dict[entity_id][store_key][key] = value["value"]
for item in values["data"]:
value = item["value"]
# WARNING It is not possible to propage enumerate hierachical
# attributes with multiselection 100% right. Unseting all values
# will cause inheritance from parent.
if (
value is None
or (isinstance(value, (tuple, list)) and not value)
):
continue
entity_id = item["entity_id"]
key = attribute_key_by_id[item["configuration_id"]]
if key.startswith("avalon_"):
store_key = "avalon_attrs"
avalon_hier.append(key)
else:
store_key = "hier_attrs"
self.entities_dict[entity_id][store_key][key] = value
# Get dictionary with not None hierarchical values to pull to childs
top_id = self.ft_project_id
@ -877,6 +904,8 @@ class SyncEntitiesFactory:
project_values[key] = value
for key in avalon_hier:
if key == CustAttrIdKey:
continue
value = self.entities_dict[top_id]["avalon_attrs"][key]
if value is not None:
project_values[key] = value
@ -887,7 +916,7 @@ class SyncEntitiesFactory:
while not hier_down_queue.empty():
hier_values, parent_id = hier_down_queue.get()
for child_id in self.entities_dict[parent_id]["children"]:
_hier_values = hier_values.copy()
_hier_values = copy.deepcopy(hier_values)
for key in attributes_by_key.keys():
if key.startswith("avalon_"):
store_key = "avalon_attrs"
@ -1593,9 +1622,16 @@ class SyncEntitiesFactory:
if current_id != new_id_str:
# store mongo id to ftrack entity
configuration_id = self.entities_dict[ftrack_id][
"avalon_attrs_id"
][CustAttrIdKey]
configuration_id = self.hier_cust_attr_ids_by_key.get(
CustAttrIdKey
)
if not configuration_id:
# NOTE this is for cases when CustAttrIdKey key is not
# hierarchical custom attribute but per entity type
configuration_id = self.entities_dict[ftrack_id][
"avalon_attrs_id"
][CustAttrIdKey]
_entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"entity_id": ftrack_id
@ -1712,14 +1748,10 @@ class SyncEntitiesFactory:
except InvalidId:
new_id = ObjectId()
project_name = self.entities_dict[self.ft_project_id]["name"]
project_item["_id"] = new_id
project_item["parent"] = None
project_item["schema"] = EntitySchemas["project"]
project_item["config"]["schema"] = EntitySchemas["config"]
project_item["config"]["template"] = (
get_avalon_project_template(project_name)
)
self.ftrack_avalon_mapper[self.ft_project_id] = new_id
self.avalon_ftrack_mapper[new_id] = self.ft_project_id
@ -1866,7 +1898,7 @@ class SyncEntitiesFactory:
parents_queue.put((self.ft_project_id, [], False))
while not parents_queue.empty():
ftrack_id, parent_parents, changed = parents_queue.get()
_parents = parent_parents.copy()
_parents = copy.deepcopy(parent_parents)
if ftrack_id not in hierarchy_changing_ids and not changed:
if ftrack_id != self.ft_project_id:
_parents.append(self.entities_dict[ftrack_id]["name"])

View file

@ -1,5 +1,6 @@
import os
import sys
import copy
import platform
import avalon.lib
import acre
@ -82,8 +83,8 @@ class AppAction(BaseAction):
"""
if (
len(entities) != 1 or
entities[0].entity_type.lower() != "task"
len(entities) != 1
or entities[0].entity_type.lower() != 'task'
):
return False
@ -141,115 +142,88 @@ class AppAction(BaseAction):
"""
entity = entities[0]
ft_project = self.get_project_from_entity(entity)
project_name = ft_project["full_name"]
project_name = entity["project"]["full_name"]
database = pypelib.get_avalon_database()
# Get current environments
env_list = [
"AVALON_PROJECT",
"AVALON_SILO",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP",
"AVALON_APP_NAME"
]
env_origin = {}
for env in env_list:
env_origin[env] = os.environ.get(env, None)
# set environments for Avalon
os.environ["AVALON_PROJECT"] = project_name
os.environ["AVALON_SILO"] = entity["ancestors"][0]["name"]
os.environ["AVALON_ASSET"] = entity["parent"]["name"]
os.environ["AVALON_TASK"] = entity["name"]
os.environ["AVALON_APP"] = self.identifier.split("_")[0]
os.environ["AVALON_APP_NAME"] = self.identifier
anatomy = Anatomy(project_name)
asset_doc = database[project_name].find_one({
asset_name = entity["parent"]["name"]
asset_document = database[project_name].find_one({
"type": "asset",
"name": entity["parent"]["name"]
"name": asset_name
})
parents = asset_doc["data"]["parents"]
hierarchy = ""
if parents:
hierarchy = os.path.join(*parents)
os.environ["AVALON_HIERARCHY"] = hierarchy
application = avalon.lib.get_application(os.environ["AVALON_APP_NAME"])
asset_doc_parents = asset_document["data"].get("parents")
if len(asset_doc_parents) > 0:
hierarchy = os.path.join(*asset_doc_parents)
application = avalon.lib.get_application(self.identifier)
data = {
"root": os.environ.get("PYPE_STUDIO_PROJECTS_MOUNT"),
"project": {
"name": ft_project["full_name"],
"code": ft_project["name"]
"name": entity["project"]["full_name"],
"code": entity["project"]["name"]
},
"task": entity["name"],
"asset": entity["parent"]["name"],
"asset": asset_name,
"app": application["application_dir"],
"hierarchy": hierarchy
}
av_project = database[project_name].find_one({"type": 'project'})
templates = None
if av_project:
work_template = av_project.get('config', {}).get('template', {}).get(
'work', None
)
work_template = None
try:
work_template = work_template.format(**data)
except Exception:
try:
anatomy = anatomy.format(data)
work_template = anatomy["work"]["folder"]
anatomy = Anatomy(project_name)
anatomy_filled = anatomy.format(data)
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
except Exception as exc:
msg = "{} Error in anatomy.format: {}".format(
__name__, str(exc)
)
self.log.error(msg, exc_info=True)
return {
'success': False,
'message': msg
}
except Exception as exc:
msg = "Error in anatomy.format: {}".format(
str(exc)
)
self.log.error(msg, exc_info=True)
return {
"success": False,
"message": msg
}
workdir = os.path.normpath(work_template)
os.environ["AVALON_WORKDIR"] = workdir
try:
os.makedirs(workdir)
except FileExistsError:
pass
# set environments for Avalon
prep_env = copy.deepcopy(os.environ)
prep_env.update({
"AVALON_PROJECT": project_name,
"AVALON_ASSET": asset_name,
"AVALON_TASK": entity["name"],
"AVALON_APP": self.identifier.split("_")[0],
"AVALON_APP_NAME": self.identifier,
"AVALON_HIERARCHY": hierarchy,
"AVALON_WORKDIR": workdir
})
prep_env.update(anatomy.roots_obj.root_environments())
# collect all parents from the task
parents = []
for item in entity['link']:
parents.append(session.get(item['type'], item['id']))
# collect all the 'environment' attributes from parents
tools_attr = [os.environ["AVALON_APP"], os.environ["AVALON_APP_NAME"]]
for parent in reversed(parents):
# check if the attribute is empty, if not use it
if parent['custom_attributes']['tools_env']:
tools_attr.extend(parent['custom_attributes']['tools_env'])
break
tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]]
tools_env = asset_document["data"].get("tools_env") or []
tools_attr.extend(tools_env)
tools_env = acre.get_tools(tools_attr)
env = acre.compute(tools_env)
env = acre.merge(env, current_env=dict(os.environ))
env = acre.append(dict(os.environ), env)
env = acre.merge(env, current_env=dict(prep_env))
env = acre.append(dict(prep_env), env)
# Get path to execute
st_temp_path = os.environ['PYPE_CONFIG']
st_temp_path = os.environ["PYPE_CONFIG"]
os_plat = platform.system().lower()
# Path to folder with launchers
path = os.path.join(st_temp_path, 'launchers', os_plat)
path = os.path.join(st_temp_path, "launchers", os_plat)
# Full path to executable launcher
execfile = None
@ -266,94 +240,92 @@ class AppAction(BaseAction):
}
if sys.platform == "win32":
for ext in os.environ["PATHEXT"].split(os.pathsep):
fpath = os.path.join(path.strip('"'), self.executable + ext)
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
execfile = fpath
break
pass
# Run SW if was found executable
if execfile is not None:
# Store subprocess to varaible. This is due to Blender launch
# bug. Please make sure Blender >=2.81 can be launched before
# remove `_popen` variable.
_popen = avalon.lib.launch(
executable=execfile, args=[], environment=env
)
else:
if execfile is None:
return {
'success': False,
'message': "We didn't found launcher for {0}"
.format(self.label)
"success": False,
"message": "We didn't find launcher for {0}".format(
self.label
)
}
if sys.platform.startswith('linux'):
popen = avalon.lib.launch(
executable=execfile, args=[], environment=env
)
elif (sys.platform.startswith("linux")
or sys.platform.startswith("darwin")):
execfile = os.path.join(path.strip('"'), self.executable)
if os.path.isfile(execfile):
try:
fp = open(execfile)
except PermissionError as p:
self.log.exception('Access denied on {0} - {1}'.format(
execfile, p))
return {
'success': False,
'message': "Access denied on launcher - {}".format(
execfile)
}
fp.close()
# check executable permission
if not os.access(execfile, os.X_OK):
self.log.error('No executable permission on {}'.format(
execfile))
return {
'success': False,
'message': "No executable permission - {}".format(
execfile)
}
if not os.path.isfile(execfile):
msg = "Launcher doesn't exist - {}".format(execfile)
else:
self.log.error('Launcher doesn\'t exist - {}'.format(
execfile))
self.log.error(msg)
return {
'success': False,
'message': "Launcher doesn't exist - {}".format(execfile)
"success": False,
"message": msg
}
try:
fp = open(execfile)
except PermissionError as perm_exc:
msg = "Access denied on launcher {} - {}".format(
execfile, perm_exc
)
self.log.exception(msg, exc_info=True)
return {
"success": False,
"message": msg
}
fp.close()
# check executable permission
if not os.access(execfile, os.X_OK):
msg = "No executable permission - {}".format(execfile)
self.log.error(msg)
return {
"success": False,
"message": msg
}
# Run SW if was found executable
if execfile is not None:
# Store subprocess to varaible. This is due to Blender launch
# bug. Please make sure Blender >=2.81 can be launched before
# remove `_popen` variable.
_popen = avalon.lib.launch(
'/usr/bin/env', args=['bash', execfile], environment=env
)
else:
if execfile is None:
return {
'success': False,
'message': "We didn't found launcher for {0}"
.format(self.label)
}
"success": False,
"message": "We didn't found launcher for {0}".format(
self.label
)
}
popen = avalon.lib.launch( # noqa: F841
"/usr/bin/env", args=["bash", execfile], environment=env
)
# Change status of task to In progress
presets = config.get_presets()["ftrack"]["ftrack_config"]
if 'status_update' in presets:
statuses = presets['status_update']
if "status_update" in presets:
statuses = presets["status_update"]
actual_status = entity['status']['name'].lower()
actual_status = entity["status"]["name"].lower()
already_tested = []
ent_path = "/".join(
[ent["name"] for ent in entity['link']]
[ent["name"] for ent in entity["link"]]
)
while True:
next_status_name = None
for key, value in statuses.items():
if key in already_tested:
continue
if actual_status in value or '_any_' in value:
if key != '_ignore_':
if actual_status in value or "_any_" in value:
if key != "_ignore_":
next_status_name = key
already_tested.append(key)
break
@ -363,12 +335,12 @@ class AppAction(BaseAction):
break
try:
query = 'Status where name is "{}"'.format(
query = "Status where name is \"{}\"".format(
next_status_name
)
status = session.query(query).one()
entity['status'] = status
entity["status"] = status
session.commit()
self.log.debug("Changing status to \"{}\" <{}>".format(
next_status_name, ent_path
@ -378,18 +350,12 @@ class AppAction(BaseAction):
except Exception:
session.rollback()
msg = (
'Status "{}" in presets wasn\'t found'
' on Ftrack entity type "{}"'
"Status \"{}\" in presets wasn't found"
" on Ftrack entity type \"{}\""
).format(next_status_name, entity.entity_type)
self.log.warning(msg)
# Set origin avalon environments
for key, value in env_origin.items():
if value == None:
value = ""
os.environ[key] = value
return {
'success': True,
'message': "Launching {0}".format(self.label)
"success": True,
"message": "Launching {0}".format(self.label)
}

View file

@ -29,7 +29,7 @@ class Login_Dialog_ui(QtWidgets.QWidget):
elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'):
self.setWindowIcon(self.parent.parent.icon)
else:
pype_setup = os.getenv('PYPE_ROOT')
pype_setup = os.getenv('PYPE_SETUP_PATH')
items = [pype_setup, "app", "resources", "icon.png"]
fname = os.path.sep.join(items)
icon = QtGui.QIcon(fname)

View file

@ -693,7 +693,7 @@ def execute_hook(hook, *args, **kwargs):
This will load hook file, instantiate class and call `execute` method
on it. Hook must be in a form:
`$PYPE_ROOT/repos/pype/path/to/hook.py/HookClass`
`$PYPE_SETUP_PATH/repos/pype/path/to/hook.py/HookClass`
This will load `hook.py`, instantiate HookClass and then execute_hook
`execute(*args, **kwargs)`
@ -704,7 +704,7 @@ def execute_hook(hook, *args, **kwargs):
class_name = hook.split("/")[-1]
abspath = os.path.join(os.getenv('PYPE_ROOT'),
abspath = os.path.join(os.getenv('PYPE_SETUP_PATH'),
'repos', 'pype', *hook.split("/")[:-1])
mod_name, mod_ext = os.path.splitext(os.path.basename(abspath))
@ -1361,3 +1361,25 @@ class BuildWorkfile:
)
return output
def ffprobe_streams(path_to_file):
"""Load streams from entered filepath via ffprobe."""
log.info(
"Getting information about input \"{}\".".format(path_to_file)
)
args = [
get_ffmpeg_tool_path("ffprobe"),
"-v quiet",
"-print_format json",
"-show_format",
"-show_streams",
"\"{}\"".format(path_to_file)
]
command = " ".join(args)
log.debug("FFprobe command: \"{}\"".format(command))
popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
popen_output = popen.communicate()[0]
log.debug("FFprobe output: {}".format(popen_output))
return json.loads(popen_output)["streams"]

View file

@ -397,7 +397,7 @@ class LogDetailWidget(QtWidgets.QWidget):
layout = QtWidgets.QVBoxLayout(self)
label = QtWidgets.QLabel("Detail")
detail_widget = LogDetailTextEdit()
detail_widget = QtWidgets.QTextEdit()
detail_widget.setReadOnly(True)
layout.addWidget(label)
layout.addWidget(detail_widget)
@ -420,66 +420,3 @@ class LogDetailWidget(QtWidgets.QWidget):
self.detail_widget.setHtml(self.html_text.format(**data))
class LogDetailTextEdit(QtWidgets.QTextEdit):
"""QTextEdit that displays version specific information.
This also overrides the context menu to add actions like copying
source path to clipboard or copying the raw data of the version
to clipboard.
"""
def __init__(self, parent=None):
super(LogDetailTextEdit, self).__init__(parent=parent)
# self.data = {
# "source": None,
# "raw": None
# }
#
# def contextMenuEvent(self, event):
# """Context menu with additional actions"""
# menu = self.createStandardContextMenu()
#
# # Add additional actions when any text so we can assume
# # the version is set.
# if self.toPlainText().strip():
#
# menu.addSeparator()
# action = QtWidgets.QAction("Copy source path to clipboard",
# menu)
# action.triggered.connect(self.on_copy_source)
# menu.addAction(action)
#
# action = QtWidgets.QAction("Copy raw data to clipboard",
# menu)
# action.triggered.connect(self.on_copy_raw)
# menu.addAction(action)
#
# menu.exec_(event.globalPos())
# del menu
#
# def on_copy_source(self):
# """Copy formatted source path to clipboard"""
# source = self.data.get("source", None)
# if not source:
# return
#
# # path = source.format(root=api.registered_root())
# # clipboard = QtWidgets.QApplication.clipboard()
# # clipboard.setText(path)
#
# def on_copy_raw(self):
# """Copy raw version data to clipboard
#
# The data is string formatted with `pprint.pformat`.
#
# """
# raw = self.data.get("raw", None)
# if not raw:
# return
#
# raw_text = pprint.pformat(raw)
# clipboard = QtWidgets.QApplication.clipboard()
# clipboard.setText(raw_text)

View file

@ -23,7 +23,7 @@ class MusterLogin(QtWidgets.QWidget):
elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'):
self.setWindowIcon(parent.parent.icon)
else:
pype_setup = os.getenv('PYPE_ROOT')
pype_setup = os.getenv('PYPE_SETUP_PATH')
items = [pype_setup, "app", "resources", "icon.png"]
fname = os.path.sep.join(items)
icon = QtGui.QIcon(fname)

View file

@ -61,7 +61,6 @@ def reload_config():
reload(module)
def install():
''' Installing all requarements for Nuke host
'''
@ -72,6 +71,9 @@ def install():
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# Register Avalon event for workfiles loading.
avalon.on("workio.open_file", lib.check_inventory_versions)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
workfile_settings = lib.WorkfileSettings()
# Disable all families except for the ones we explicitly want to see

View file

@ -177,9 +177,16 @@ def format_anatomy(data):
log.debug("__ anatomy.templates: {}".format(anatomy.templates))
try:
padding = int(anatomy.templates['render']['padding'])
# TODO: bck compatibility with old anatomy template
padding = int(
anatomy.templates["render"].get(
"frame_padding",
anatomy.templates["render"].get("padding")
)
)
except KeyError as e:
msg = ("`padding` key is not in `render` "
"or `frame_padding` on is not available in "
"Anatomy template. Please, add it there and restart "
"the pipeline (padding: \"4\"): `{}`").format(e)
@ -192,7 +199,6 @@ def format_anatomy(data):
data["version"] = pype.get_version_from_path(file)
project_document = pype.get_project()
data.update({
"root": api.Session["AVALON_PROJECTS"],
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": api.Session["AVALON_TASK"],
@ -313,7 +319,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
if input:
# if connected input node was defined
connections.append({
"node": input,
"node": input,
"inputName": input.name()})
prev_node = nuke.createNode(
"Input", "name {}".format(input.name()))
@ -369,7 +375,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
write_node = now_node = avalon.nuke.lib.add_write_node(
"inside_{}".format(name),
**_data
)
)
# connect to previous node
now_node.setInput(0, prev_node)
@ -393,11 +399,13 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
if review:
add_review_knob(GN)
# add render button
lnk = nuke.Link_Knob("Render")
lnk.makeLink(write_node.name(), "Render")
lnk.setName("Render")
GN.addKnob(lnk)
# Add linked knobs.
linked_knob_names = ["Render", "use_limit", "first", "last"]
for name in linked_knob_names:
link = nuke.Link_Knob(name)
link.makeLink(write_node.name(), name)
link.setName(name)
GN.addKnob(link)
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@ -408,7 +416,6 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
# Deadline tab.
add_deadline_tab(GN)
# set tile color
tile_color = _data.get("tile_color", "0xff0000ff")
GN["tile_color"].setValue(tile_color)
@ -436,6 +443,7 @@ def add_rendering_knobs(node):
node.addKnob(knob)
return node
def add_review_knob(node):
''' Adds additional review knob to given node
@ -645,8 +653,9 @@ class WorkfileSettings(object):
if root_dict.get("customOCIOConfigPath"):
self._root_node["customOCIOConfigPath"].setValue(
str(root_dict["customOCIOConfigPath"]).format(
**os.environ).replace("\\", "/")
)
**os.environ
).replace("\\", "/")
)
log.debug("nuke.root()['{}'] changed to: {}".format(
"customOCIOConfigPath", root_dict["customOCIOConfigPath"]))
root_dict.pop("customOCIOConfigPath")
@ -750,10 +759,9 @@ class WorkfileSettings(object):
if changes:
msg = "Read nodes are not set to correct colospace:\n\n"
for nname, knobs in changes.items():
msg += str(" - node: '{0}' is now '{1}' "
"but should be '{2}'\n").format(
nname, knobs["from"], knobs["to"]
)
msg += str(
" - node: '{0}' is now '{1}' but should be '{2}'\n"
).format(nname, knobs["from"], knobs["to"])
msg += "\nWould you like to change it?"
@ -972,7 +980,9 @@ class WorkfileSettings(object):
self.set_colorspace()
def set_favorites(self):
projects_root = os.getenv("AVALON_PROJECTS")
anatomy = get_anatomy()
work_template = anatomy.templates["work"]["path"]
projects_root = anatomy.root_value_for_template(work_template)
work_dir = os.getenv("AVALON_WORKDIR")
asset = os.getenv("AVALON_ASSET")
project = os.getenv("AVALON_PROJECT")
@ -1111,15 +1121,15 @@ class BuildWorkfile(WorkfileSettings):
self.to_script = to_script
# collect data for formating
self.data_tmp = {
"root": root_path or api.Session["AVALON_PROJECTS"],
"project": {"name": self._project["name"],
"code": self._project["data"].get("code", '')},
"code": self._project["data"].get("code", "")},
"asset": self._asset or os.environ["AVALON_ASSET"],
"task": kwargs.get("task") or api.Session["AVALON_TASK"],
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
"version": kwargs.get("version", {}).get("name", 1),
"user": getpass.getuser(),
"comment": "firstBuild"
"comment": "firstBuild",
"ext": "nk"
}
# get presets from anatomy
@ -1128,8 +1138,8 @@ class BuildWorkfile(WorkfileSettings):
anatomy_filled = anatomy.format(self.data_tmp)
# get dir and file for workfile
self.work_dir = anatomy_filled["avalon"]["work"]
self.work_file = anatomy_filled["avalon"]["workfile"] + ".nk"
self.work_dir = anatomy_filled["work"]["folder"]
self.work_file = anatomy_filled["work"]["file"]
def save_script_as(self, path=None):
# first clear anything in open window
@ -1412,7 +1422,6 @@ class ExporterReview:
'ext': self.ext,
'files': self.file,
"stagingDir": self.staging_dir,
"anatomy_template": "render",
"tags": [self.name.replace("_", "-")] + add_tags
}
@ -1420,7 +1429,7 @@ class ExporterReview:
repre.update({
"frameStart": self.first_frame,
"frameEnd": self.last_frame,
})
})
self.data["representations"].append(repre)
@ -1655,11 +1664,12 @@ class ExporterReviewMov(ExporterReview):
if not self.viewer_lut_raw:
colorspaces = [
self.bake_colorspace_main, self.bake_colorspace_fallback
]
]
if any(colorspaces):
# OCIOColorSpace with controled output
dag_node = nuke.createNode("OCIOColorSpace")
self._temp_nodes.append(dag_node)
for c in colorspaces:
test = dag_node["out_colorspace"].setValue(str(c))
if test:
@ -1709,7 +1719,7 @@ class ExporterReviewMov(ExporterReview):
self.get_representation_data(
tags=["review", "delete"],
range=True
)
)
self.log.debug("Representation... `{}`".format(self.data))
@ -1744,14 +1754,14 @@ def get_dependent_nodes(nodes):
if test_in:
connections_in.update({
node: test_in
})
})
# collect all outputs outside
test_out = [i for i in outputs if i.name() not in node_names]
if test_out:
# only one dependent node is allowed
connections_out.update({
node: test_out[-1]
})
})
return connections_in, connections_out

View file

@ -6,7 +6,7 @@ import pyblish.api
import avalon.api as avalon
from avalon.vendor.Qt import (QtWidgets, QtGui)
import pype.api as pype
from pypeapp import Logger
from pypeapp import Logger, Anatomy
log = Logger().get_logger(__name__, "nukestudio")
@ -30,12 +30,17 @@ def set_workfiles():
# show workfile gui
workfiles.show(workdir)
def sync_avalon_data_to_workfile():
# import session to get project dir
S = avalon.Session
active_project_root = os.path.normpath(
os.path.join(S['AVALON_PROJECTS'], S['AVALON_PROJECT'])
)
project_name = avalon.Session["AVALON_PROJECT"]
anatomy = Anatomy(project_name)
work_template = anatomy.templates["work"]["path"]
work_root = anatomy.root_value_for_template(work_template)
active_project_root = (
os.path.join(work_root, project_name)
).replace("\\", "/")
# getting project
project = hiero.core.projects()[-1]
@ -350,17 +355,19 @@ def CreateNukeWorkfile(nodes=None,
# create root node and save all metadata
root_node = hiero.core.nuke.RootNode()
root_path = os.environ["AVALON_PROJECTS"]
anatomy = Anatomy(os.environ["AVALON_PROJECT"])
work_template = anatomy.templates["work"]["path"]
root_path = anatomy.root_value_for_template(work_template)
nuke_script.addNode(root_node)
# here to call pype.nuke.lib.BuildWorkfile
script_builder = nklib.BuildWorkfile(
root_node=root_node,
root_path=root_path,
nodes=nuke_script.getNodes(),
**kwargs
)
root_node=root_node,
root_path=root_path,
nodes=nuke_script.getNodes(),
**kwargs
)
class ClipLoader:

View file

@ -39,10 +39,8 @@ class CollectContextDataFromAport(pyblish.api.ContextPlugin):
# get avalon session data and convert \ to /
_S = avalon.session
projects = Path(_S["AVALON_PROJECTS"]).resolve()
asset = _S["AVALON_ASSET"]
workdir = Path(_S["AVALON_WORKDIR"]).resolve()
_S["AVALON_PROJECTS"] = str(projects)
_S["AVALON_WORKDIR"] = str(workdir)
context.data["avalonSession"] = _S

View file

@ -22,7 +22,7 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
ftrack_log.setLevel(logging.WARNING)
# Collect session
session = ftrack_api.Session()
session = ftrack_api.Session(auto_connect_event_hub=True)
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
context.data["ftrackSession"] = session

View file

@ -63,7 +63,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"name": "thumbnail" # Default component name is "main".
}
comp['thumbnail'] = True
elif comp.get('preview') or ("preview" in comp.get('tags', [])):
elif comp.get('ftrackreview') or ("ftrackreview" in comp.get('tags', [])):
'''
Ftrack bug requirement:
- Start frame must be 0

View file

@ -68,6 +68,9 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
# Top-level group name
"BatchName": filename,
# Asset dependency to wait for at least the scene file to sync.
"AssetDependency0": filepath,
# Job name, as seen in Monitor
"Name": filename,

View file

@ -1,13 +1,14 @@
"""Collect Anatomy and global anatomy data.
"""Collect global context Anatomy data.
Requires:
context -> anatomy
context -> projectEntity
context -> assetEntity
context -> username
context -> datetimeData
session -> AVALON_TASK
projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder)
username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001)
datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder)
Provides:
context -> anatomy (pypeapp.Anatomy)
context -> anatomyData
"""
@ -15,45 +16,51 @@ import os
import json
from avalon import api, lib
from pypeapp import Anatomy
import pyblish.api
class CollectAnatomy(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
class CollectAnatomyContextData(pyblish.api.ContextPlugin):
"""Collect Anatomy Context data.
Example:
context.data["anatomyData"] = {
"project": {
"name": "MyProject",
"code": "myproj"
},
"asset": "AssetName",
"hierarchy": "path/to/asset",
"task": "Working",
"username": "MeDespicable",
*** OPTIONAL ***
"app": "maya" # Current application base name
+ mutliple keys from `datetimeData` # see it's collector
}
"""
order = pyblish.api.CollectorOrder + 0.002
label = "Collect Anatomy"
label = "Collect Anatomy Context Data"
def process(self, context):
root_path = api.registered_root()
task_name = api.Session["AVALON_TASK"]
project_entity = context.data["projectEntity"]
asset_entity = context.data["assetEntity"]
project_name = project_entity["name"]
context.data["anatomy"] = Anatomy(project_name)
self.log.info(
"Anatomy object collected for project \"{}\".".format(project_name)
)
hierarchy_items = asset_entity["data"]["parents"]
hierarchy = ""
if hierarchy_items:
hierarchy = os.path.join(*hierarchy_items)
context_data = {
"root": root_path,
"project": {
"name": project_name,
"name": project_entity["name"],
"code": project_entity["data"].get("code")
},
"asset": asset_entity["name"],
"hierarchy": hierarchy.replace("\\", "/"),
"task": task_name,
"username": context.data["user"]
}

View file

@ -28,11 +28,11 @@ from avalon import io
import pyblish.api
class CollectInstanceAnatomyData(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
class CollectAnatomyInstanceData(pyblish.api.InstancePlugin):
"""Collect Instance specific Anatomy data."""
order = pyblish.api.CollectorOrder + 0.49
label = "Collect instance anatomy data"
label = "Collect Anatomy Instance data"
def process(self, instance):
# get all the stuff from the database

View file

@ -0,0 +1,32 @@
"""Collect Anatomy object.
Requires:
os.environ -> AVALON_PROJECT
Provides:
context -> anatomy (pypeapp.Anatomy)
"""
import os
from pypeapp import Anatomy
import pyblish.api
class CollectAnatomyObject(pyblish.api.ContextPlugin):
"""Collect Anatomy object into Context"""
order = pyblish.api.CollectorOrder - 0.4
label = "Collect Anatomy Object"
def process(self, context):
project_name = os.environ.get("AVALON_PROJECT")
if project_name is None:
raise AssertionError(
"Environment `AVALON_PROJECT` is not set."
"Could not initialize project's Anatomy."
)
context.data["anatomy"] = Anatomy(project_name)
self.log.info(
"Anatomy object collected for project \"{}\".".format(project_name)
)

View file

@ -15,7 +15,7 @@ import pyblish.api
class CollectAvalonEntities(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder - 0.02
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Avalon Entities"
def process(self, context):
@ -51,10 +51,26 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin):
context.data["frameStart"] = data.get("frameStart")
context.data["frameEnd"] = data.get("frameEnd")
handles = int(data.get("handles") or 0)
context.data["handles"] = handles
context.data["handleStart"] = int(data.get("handleStart", handles))
context.data["handleEnd"] = int(data.get("handleEnd", handles))
handles = data.get("handles") or 0
handle_start = data.get("handleStart")
if handle_start is None:
handle_start = handles
self.log.info((
"Key \"handleStart\" is not set."
" Using value from \"handles\" key {}."
).format(handle_start))
handle_end = data.get("handleEnd")
if handle_end is None:
handle_end = handles
self.log.info((
"Key \"handleEnd\" is not set."
" Using value from \"handles\" key {}."
).format(handle_end))
context.data["handles"] = int(handles)
context.data["handleStart"] = int(handle_start)
context.data["handleEnd"] = int(handle_end)
frame_start_h = data.get("frameStart") - context.data["handleStart"]
frame_end_h = data.get("frameEnd") + context.data["handleEnd"]

View file

@ -1,64 +0,0 @@
"""
Requires:
environment -> DEADLINE_PATH
Provides:
context -> deadlineUser (str)
"""
import os
import subprocess
import pyblish.api
from pype.plugin import contextplugin_should_run
CREATE_NO_WINDOW = 0x08000000
def deadline_command(cmd):
# Find Deadline
path = os.environ.get("DEADLINE_PATH", None)
assert path is not None, "Variable 'DEADLINE_PATH' must be set"
executable = os.path.join(path, "deadlinecommand")
if os.name == "nt":
executable += ".exe"
assert os.path.exists(
executable), "Deadline executable not found at %s" % executable
assert cmd, "Must have a command"
query = (executable, cmd)
process = subprocess.Popen(query, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
creationflags=CREATE_NO_WINDOW)
out, err = process.communicate()
return out
class CollectDeadlineUser(pyblish.api.ContextPlugin):
"""Retrieve the local active Deadline user"""
order = pyblish.api.CollectorOrder + 0.499
label = "Deadline User"
hosts = ['maya', 'fusion']
families = ["renderlayer", "saver.deadline"]
def process(self, context):
"""Inject the current working file"""
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
user = deadline_command("GetCurrentUserName").strip()
if not user:
self.log.warning("No Deadline user found. "
"Do you have Deadline installed?")
return
self.log.info("Found Deadline user: {}".format(user))
context.data['deadlineUser'] = user

View file

@ -1,11 +1,18 @@
"""Loads publishing context from json and continues in publish process.
Requires:
anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11)
Provides:
context, instances -> All data from previous publishing process.
"""
import os
import json
import pyblish.api
from avalon import api
from pypeapp import PypeLauncher
class CollectRenderedFiles(pyblish.api.ContextPlugin):
"""
@ -13,14 +20,17 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
`PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
"""
order = pyblish.api.CollectorOrder - 0.1
order = pyblish.api.CollectorOrder - 0.2
targets = ["filesequence"]
label = "Collect rendered frames"
_context = None
def _load_json(self, path):
assert os.path.isfile(path), ("path to json file doesn't exist")
path = path.strip('\"')
assert os.path.isfile(path), (
"Path to json file doesn't exist. \"{}\"".format(path)
)
data = None
with open(path, "r") as json_file:
try:
@ -32,7 +42,12 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
)
return data
def _process_path(self, data):
def _fill_staging_dir(self, data_object, anatomy):
staging_dir = data_object.get("stagingDir")
if staging_dir:
data_object["stagingDir"] = anatomy.fill_root(staging_dir)
def _process_path(self, data, anatomy):
# validate basic necessary data
data_err = "invalid json file - missing data"
required = ["asset", "user", "comment",
@ -66,14 +81,23 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"]
# now we can just add instances from json file and we are done
for instance in data.get("instances"):
for instance_data in data.get("instances"):
self.log.info(" - processing instance for {}".format(
instance.get("subset")))
i = self._context.create_instance(instance.get("subset"))
self.log.info("remapping paths ...")
i.data["representations"] = [PypeLauncher().path_remapper(
data=r) for r in instance.get("representations")]
i.data.update(instance)
instance_data.get("subset")))
instance = self._context.create_instance(
instance_data.get("subset")
)
self.log.info("Filling stagignDir...")
self._fill_staging_dir(instance_data, anatomy)
instance.data.update(instance_data)
representations = []
for repre_data in instance_data.get("representations") or []:
self._fill_staging_dir(repre_data, anatomy)
representations.append(repre_data)
instance.data["representations"] = representations
# add audio if in metadata data
if data.get("audio"):
@ -92,13 +116,39 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
"Missing `PYPE_PUBLISH_DATA`")
paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep)
session_set = False
for path in paths:
data = self._load_json(path)
if not session_set:
self.log.info("Setting session using data from file")
api.Session.update(data.get("session"))
os.environ.update(data.get("session"))
session_set = True
assert data, "failed to load json file"
self._process_path(data)
project_name = os.environ.get("AVALON_PROJECT")
if project_name is None:
raise AssertionError(
"Environment `AVALON_PROJECT` was not found."
"Could not set project `root` which may cause issues."
)
# TODO root filling should happen after collect Anatomy
self.log.info("Getting root setting for project \"{}\"".format(
project_name
))
anatomy = context.data["anatomy"]
self.log.info("anatomy: {}".format(anatomy.roots))
try:
session_is_set = False
for path in paths:
path = anatomy.fill_root(path)
data = self._load_json(path)
assert data, "failed to load json file"
if not session_is_set:
session_data = data["session"]
remapped = anatomy.roots_obj.path_remapper(
session_data["AVALON_WORKDIR"]
)
if remapped:
session_data["AVALON_WORKDIR"] = remapped
self.log.info("Setting session using data from file")
api.Session.update(session_data)
os.environ.update(session_data)
session_is_set = True
self._process_path(data, anatomy)
except Exception as e:
self.log.error(e, exc_info=True)
raise Exception("Error") from e

View file

@ -1,4 +1,5 @@
import os
import re
import json
import copy
@ -18,13 +19,774 @@ class ExtractBurnin(pype.api.Extractor):
label = "Extract burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
hosts = ["nuke", "maya", "shell", "premiere"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"]
optional = True
positions = [
"top_left", "top_centered", "top_right",
"bottom_right", "bottom_centered", "bottom_left"
]
# Default options for burnins for cases that are not set in presets.
default_options = {
"opacity": 1,
"x_offset": 5,
"y_offset": 5,
"bg_padding": 5,
"bg_opacity": 0.5,
"font_size": 42
}
# Preset attributes
profiles = None
options = None
fields = None
def process(self, instance):
# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
instance_label = (
getattr(instance, "label", None)
or instance.data.get("label")
or instance.data.get("name")
)
self.log.info((
"Instance \"{}\" contain \"multipartExr\". Skipped."
).format(instance_label))
return
# QUESTION what is this for and should we raise an exception?
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
if self.profiles is None:
return self.legacy_process(instance)
self.main_process(instance)
# Remove any representations tagged for deletion.
# QUESTION Is possible to have representation with "delete" tag?
for repre in tuple(instance.data["representations"]):
if "delete" in repre.get("tags", []):
self.log.debug("Removing representation: {}".format(repre))
instance.data["representations"].remove(repre)
self.log.debug(instance.data["representations"])
def main_process(self, instance):
# TODO get these data from context
host_name = os.environ["AVALON_APP"]
task_name = os.environ["AVALON_TASK"]
family = self.main_family_from_instance(instance)
# Find profile most matching current host, task and instance family
profile = self.find_matching_profile(host_name, task_name, family)
if not profile:
self.log.info((
"Skipped instance. None of profiles in presets are for"
" Host: \"{}\" | Family: \"{}\" | Task \"{}\""
).format(host_name, family, task_name))
return
# Pre-filter burnin definitions by instance families
burnin_defs = self.filter_burnins_by_families(profile, instance)
if not burnin_defs:
self.log.info((
"Skipped instance. Burnin definitions are not set for profile"
" Host: \"{}\" | Family: \"{}\" | Task \"{}\" | Profile \"{}\""
).format(host_name, family, task_name, profile))
return
# Prepare burnin options
profile_options = copy.deepcopy(self.default_options)
for key, value in (self.options or {}).items():
if value is not None:
profile_options[key] = value
# Prepare global burnin values from presets
profile_burnins = {}
for key, value in (self.fields or {}).items():
key_low = key.lower()
if key_low in self.positions:
if value is not None:
profile_burnins[key_low] = value
# Prepare basic data for processing
_burnin_data, _temp_data = self.prepare_basic_data(instance)
anatomy = instance.context.data["anatomy"]
scriptpath = self.burnin_script_path()
executable = self.python_executable_path()
for idx, repre in enumerate(tuple(instance.data["representations"])):
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
if not self.repres_is_valid(repre):
continue
# Filter output definition by representation tags (optional)
repre_burnin_defs = self.filter_burnins_by_tags(
burnin_defs, repre["tags"]
)
if not repre_burnin_defs:
self.log.info((
"Skipped representation. All burnin definitions from"
" selected profile does not match to representation's"
" tags. \"{}\""
).format(str(repre["tags"])))
continue
# Create copy of `_burnin_data` and `_temp_data` for repre.
burnin_data = copy.deepcopy(_burnin_data)
temp_data = copy.deepcopy(_temp_data)
# Prepare representation based data.
self.prepare_repre_data(instance, repre, burnin_data, temp_data)
# Add anatomy keys to burnin_data.
filled_anatomy = anatomy.format_all(burnin_data)
burnin_data["anatomy"] = filled_anatomy.get_solved()
first_output = True
files_to_delete = []
for filename_suffix, burnin_def in repre_burnin_defs.items():
new_repre = copy.deepcopy(repre)
# Keep "ftrackreview" tag only on first output
if first_output:
first_output = False
elif "ftrackreview" in new_repre["tags"]:
new_repre["tags"].remove("ftrackreview")
burnin_options = copy.deepcopy(profile_options)
burnin_values = copy.deepcopy(profile_burnins)
# Options overrides
for key, value in (burnin_def.get("options") or {}).items():
# Set or override value if is valid
if value is not None:
burnin_options[key] = value
# Burnin values overrides
for key, value in burnin_def.items():
key_low = key.lower()
if key_low in self.positions:
if value is not None:
# Set or override value if is valid
burnin_values[key_low] = value
elif key_low in burnin_values:
# Pop key if value is set to None (null in json)
burnin_values.pop(key_low)
# Remove "delete" tag from new representation
if "delete" in new_repre["tags"]:
new_repre["tags"].remove("delete")
# Update name and outputName to be able have multiple outputs
# Join previous "outputName" with filename suffix
new_name = "_".join([new_repre["outputName"], filename_suffix])
new_repre["name"] = new_name
new_repre["outputName"] = new_name
# Prepare paths and files for process.
self.input_output_paths(new_repre, temp_data, filename_suffix)
# Data for burnin script
script_data = {
"input": temp_data["full_input_path"],
"output": temp_data["full_output_path"],
"burnin_data": burnin_data,
"options": burnin_options,
"values": burnin_values
}
self.log.debug(
"script_data: {}".format(json.dumps(script_data, indent=4))
)
# Dump data to string
dumped_script_data = json.dumps(script_data)
# Prepare subprocess arguments
args = [executable, scriptpath, dumped_script_data]
self.log.debug("Executing: {}".format(args))
# Run burnin script
output = pype.api.subprocess(args)
self.log.debug("Output: {}".format(output))
for filepath in temp_data["full_input_paths"]:
filepath = filepath.replace("\\", "/")
if filepath not in files_to_delete:
files_to_delete.append(filepath)
# Add new representation to instance
instance.data["representations"].append(new_repre)
# Remove source representation
# NOTE we maybe can keep source representation if necessary
instance.data["representations"].remove(repre)
# Delete input files
for filepath in files_to_delete:
if os.path.exists(filepath):
os.remove(filepath)
self.log.debug("Removed: \"{}\"".format(filepath))
def prepare_basic_data(self, instance):
"""Pick data from instance for processing and for burnin strings.
Args:
instance (Instance): Currently processed instance.
Returns:
tuple: `(burnin_data, temp_data)` - `burnin_data` contain data for
filling burnin strings. `temp_data` are for repre pre-process
preparation.
"""
self.log.debug("Prepring basic data for burnins")
context = instance.context
version = instance.data.get("version")
if version is None:
version = context.data.get("version")
frame_start = instance.data.get("frameStart")
if frame_start is None:
self.log.warning(
"Key \"frameStart\" is not set. Setting to \"0\"."
)
frame_start = 0
frame_start = int(frame_start)
frame_end = instance.data.get("frameEnd")
if frame_end is None:
self.log.warning(
"Key \"frameEnd\" is not set. Setting to \"1\"."
)
frame_end = 1
frame_end = int(frame_end)
handles = instance.data.get("handles")
if handles is None:
handles = context.data.get("handles")
if handles is None:
handles = 0
handle_start = instance.data.get("handleStart")
if handle_start is None:
handle_start = context.data.get("handleStart")
if handle_start is None:
handle_start = handles
handle_end = instance.data.get("handleEnd")
if handle_end is None:
handle_end = context.data.get("handleEnd")
if handle_end is None:
handle_end = handles
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
burnin_data = copy.deepcopy(instance.data["anatomyData"])
if "slate.farm" in instance.data["families"]:
frame_start_handle += 1
burnin_data.update({
"version": int(version),
"comment": context.data.get("comment") or ""
})
intent_label = context.data.get("intent")
if intent_label and isinstance(intent_label, dict):
intent_label = intent_label.get("label")
if intent_label:
burnin_data["intent"] = intent_label
temp_data = {
"frame_start": frame_start,
"frame_end": frame_end,
"frame_start_handle": frame_start_handle,
"frame_end_handle": frame_end_handle
}
self.log.debug(
"Basic burnin_data: {}".format(json.dumps(burnin_data, indent=4))
)
return burnin_data, temp_data
def repres_is_valid(self, repre):
"""Validation if representaion should be processed.
Args:
repre (dict): Representation which should be checked.
Returns:
bool: False if can't be processed else True.
"""
if "burnin" not in (repre.get("tags") or []):
self.log.info((
"Representation \"{}\" don't have \"burnin\" tag. Skipped."
).format(repre["name"]))
return False
return True
def filter_burnins_by_tags(self, burnin_defs, tags):
"""Filter burnin definitions by entered representation tags.
Burnin definitions without tags filter are marked as valid.
Args:
outputs (list): Contain list of burnin definitions from presets.
tags (list): Tags of processed representation.
Returns:
list: Containg all burnin definitions matching entered tags.
"""
filtered_burnins = {}
repre_tags_low = [tag.lower() for tag in tags]
for filename_suffix, burnin_def in burnin_defs.items():
valid = True
output_filters = burnin_def.get("filter")
if output_filters:
# Check tag filters
tag_filters = output_filters.get("tags")
if tag_filters:
tag_filters_low = [tag.lower() for tag in tag_filters]
valid = False
for tag in repre_tags_low:
if tag in tag_filters_low:
valid = True
break
if not valid:
continue
if valid:
filtered_burnins[filename_suffix] = burnin_def
return filtered_burnins
def input_output_paths(self, new_repre, temp_data, filename_suffix):
"""Prepare input and output paths for representation.
Store data to `temp_data` for keys "full_input_path" which is full path
to source files optionally with sequence formatting,
"full_output_path" full path to otput with optionally with sequence
formatting, "full_input_paths" list of all source files which will be
deleted when burnin script ends, "repre_files" list of output
filenames.
Args:
new_repre (dict): Currently processed new representation.
temp_data (dict): Temp data of representation process.
filename_suffix (str): Filename suffix added to inputl filename.
Returns:
None: This is processing method.
"""
# TODO we should find better way to know if input is sequence
is_sequence = (
"sequence" in new_repre["tags"]
and isinstance(new_repre["files"], (tuple, list))
)
if is_sequence:
input_filename = new_repre["sequence_file"]
else:
input_filename = new_repre["files"]
filepart_start, ext = os.path.splitext(input_filename)
dir_path, basename = os.path.split(filepart_start)
if is_sequence:
# NOTE modified to keep name when multiple dots are in name
basename_parts = basename.split(".")
frame_part = basename_parts.pop(-1)
basename_start = ".".join(basename_parts) + filename_suffix
new_basename = ".".join((basename_start, frame_part))
output_filename = new_basename + ext
else:
output_filename = basename + filename_suffix + ext
if dir_path:
output_filename = os.path.join(dir_path, output_filename)
stagingdir = new_repre["stagingDir"]
full_input_path = os.path.join(
os.path.normpath(stagingdir), input_filename
).replace("\\", "/")
full_output_path = os.path.join(
os.path.normpath(stagingdir), output_filename
).replace("\\", "/")
temp_data["full_input_path"] = full_input_path
temp_data["full_output_path"] = full_output_path
self.log.debug("full_input_path: {}".format(full_input_path))
self.log.debug("full_output_path: {}".format(full_output_path))
# Prepare full paths to input files and filenames for reprensetation
full_input_paths = []
if is_sequence:
repre_files = []
for frame_index in range(1, temp_data["duration"] + 1):
repre_files.append(output_filename % frame_index)
full_input_paths.append(full_input_path % frame_index)
else:
full_input_paths.append(full_input_path)
repre_files = output_filename
temp_data["full_input_paths"] = full_input_paths
new_repre["files"] = repre_files
def prepare_repre_data(self, instance, repre, burnin_data, temp_data):
"""Prepare data for representation.
Args:
instance (Instance): Currently processed Instance.
repre (dict): Currently processed representation.
burnin_data (dict): Copy of basic burnin data based on instance
data.
temp_data (dict): Copy of basic temp data
"""
# Add representation name to burnin data
burnin_data["representation"] = repre["name"]
# no handles switch from profile tags
if "no-handles" in repre["tags"]:
burnin_frame_start = temp_data["frame_start"]
burnin_frame_end = temp_data["frame_end"]
else:
burnin_frame_start = temp_data["frame_start_handle"]
burnin_frame_end = temp_data["frame_end_handle"]
burnin_duration = burnin_frame_end - burnin_frame_start + 1
burnin_data.update({
"frame_start": burnin_frame_start,
"frame_end": burnin_frame_end,
"duration": burnin_duration,
})
temp_data["duration"] = burnin_duration
# Add values for slate frames
burnin_slate_frame_start = burnin_frame_start
# Move frame start by 1 frame when slate is used.
if (
"slate" in instance.data["families"]
and "slate-frame" in repre["tags"]
):
burnin_slate_frame_start -= 1
self.log.debug("burnin_slate_frame_start: {}".format(
burnin_slate_frame_start
))
burnin_data.update({
"slate_frame_start": burnin_slate_frame_start,
"slate_frame_end": burnin_frame_end,
"slate_duration": (
burnin_frame_end - burnin_slate_frame_start + 1
)
})
def find_matching_profile(self, host_name, task_name, family):
""" Filter profiles by Host name, Task name and main Family.
Filtering keys are "hosts" (list), "tasks" (list), "families" (list).
If key is not find or is empty than it's expected to match.
Args:
profiles (list): Profiles definition from presets.
host_name (str): Current running host name.
task_name (str): Current context task name.
family (str): Main family of current Instance.
Returns:
dict/None: Return most matching profile or None if none of profiles
match at least one criteria.
"""
matching_profiles = None
highest_points = -1
for profile in self.profiles or tuple():
profile_points = 0
profile_value = []
# Host filtering
host_names = profile.get("hosts")
match = self.validate_value_by_regexes(host_name, host_names)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
# Task filtering
task_names = profile.get("tasks")
match = self.validate_value_by_regexes(task_name, task_names)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
# Family filtering
families = profile.get("families")
match = self.validate_value_by_regexes(family, families)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
if profile_points > highest_points:
matching_profiles = []
highest_points = profile_points
if profile_points == highest_points:
profile["__value__"] = profile_value
matching_profiles.append(profile)
if not matching_profiles:
return
if len(matching_profiles) == 1:
return matching_profiles[0]
return self.profile_exclusion(matching_profiles)
def profile_exclusion(self, matching_profiles):
"""Find out most matching profile by host, task and family match.
Profiles are selectivelly filtered. Each profile should have
"__value__" key with list of booleans. Each boolean represents
existence of filter for specific key (host, taks, family).
Profiles are looped in sequence. In each sequence are split into
true_list and false_list. For next sequence loop are used profiles in
true_list if there are any profiles else false_list is used.
Filtering ends when only one profile left in true_list. Or when all
existence booleans loops passed, in that case first profile from left
profiles is returned.
Args:
matching_profiles (list): Profiles with same values.
Returns:
dict: Most matching profile.
"""
self.log.info(
"Search for first most matching profile in match order:"
" Host name -> Task name -> Family."
)
# Filter all profiles with highest points value. First filter profiles
# with matching host if there are any then filter profiles by task
# name if there are any and lastly filter by family. Else use first in
# list.
idx = 0
final_profile = None
while True:
profiles_true = []
profiles_false = []
for profile in matching_profiles:
value = profile["__value__"]
# Just use first profile when idx is greater than values.
if not idx < len(value):
final_profile = profile
break
if value[idx]:
profiles_true.append(profile)
else:
profiles_false.append(profile)
if final_profile is not None:
break
if profiles_true:
matching_profiles = profiles_true
else:
matching_profiles = profiles_false
if len(matching_profiles) == 1:
final_profile = matching_profiles[0]
break
idx += 1
final_profile.pop("__value__")
return final_profile
def filter_burnins_by_families(self, profile, instance):
"""Filter outputs that are not supported for instance families.
Output definitions without families filter are marked as valid.
Args:
profile (dict): Profile from presets matching current context.
families (list): All families of current instance.
Returns:
list: Containg all output definitions matching entered families.
"""
filtered_burnin_defs = {}
burnin_defs = profile.get("burnins")
if not burnin_defs:
return filtered_burnin_defs
# Prepare families
families = self.families_from_instance(instance)
families = [family.lower() for family in families]
for filename_suffix, burnin_def in burnin_defs.items():
burnin_filter = burnin_def.get("filter")
# When filters not set then skip filtering process
if burnin_filter:
families_filters = burnin_filter.get("families")
if not self.families_filter_validation(
families, families_filters
):
continue
filtered_burnin_defs[filename_suffix] = burnin_def
return filtered_burnin_defs
def families_filter_validation(self, families, output_families_filter):
"""Determines if entered families intersect with families filters.
All family values are lowered to avoid unexpected results.
"""
if not output_families_filter:
return True
for family_filter in output_families_filter:
if not family_filter:
continue
if not isinstance(family_filter, (list, tuple)):
if family_filter.lower() not in families:
continue
return True
valid = True
for family in family_filter:
if family.lower() not in families:
valid = False
break
if valid:
return True
return False
def compile_list_of_regexes(self, in_list):
"""Convert strings in entered list to compiled regex objects."""
regexes = []
if not in_list:
return regexes
for item in in_list:
if not item:
continue
try:
regexes.append(re.compile(item))
except TypeError:
self.log.warning((
"Invalid type \"{}\" value \"{}\"."
" Expected string based object. Skipping."
).format(str(type(item)), str(item)))
return regexes
def validate_value_by_regexes(self, value, in_list):
"""Validates in any regexe from list match entered value.
Args:
in_list (list): List with regexes.
value (str): String where regexes is checked.
Returns:
int: Returns `0` when list is not set or is empty. Returns `1` when
any regex match value and returns `-1` when none of regexes
match value entered.
"""
if not in_list:
return 0
output = -1
regexes = self.compile_list_of_regexes(in_list)
for regex in regexes:
if re.match(regex, value):
output = 1
break
return output
def main_family_from_instance(self, instance):
"""Returns main family of entered instance."""
family = instance.data.get("family")
if not family:
family = instance.data["families"][0]
return family
def families_from_instance(self, instance):
"""Returns all families of entered instance."""
families = []
family = instance.data.get("family")
if family:
families.append(family)
for family in (instance.data.get("families") or tuple()):
if family not in families:
families.append(family)
return families
def burnin_script_path(self):
"""Returns path to python script for burnin processing."""
# TODO maybe convert to Plugin's attribute
# Get script path.
module_path = os.environ["PYPE_MODULE_ROOT"]
# There can be multiple paths in PYPE_MODULE_ROOT, in which case
# we just take first one.
if os.pathsep in module_path:
module_path = module_path.split(os.pathsep)[0]
scriptpath = os.path.normpath(
os.path.join(
module_path,
"pype",
"scripts",
"otio_burnin.py"
)
)
self.log.debug("scriptpath: {}".format(scriptpath))
return scriptpath
def python_executable_path(self):
"""Returns path to Python 3 executable."""
# TODO maybe convert to Plugin's attribute
# Get executable.
executable = os.getenv("PYPE_PYTHON_EXE")
# There can be multiple paths in PYPE_PYTHON_EXE, in which case
# we just take first one.
if os.pathsep in executable:
executable = executable.split(os.pathsep)[0]
self.log.debug("executable: {}".format(executable))
return executable
def legacy_process(self, instance):
self.log.warning("Legacy burnin presets are used.")
context_data = instance.context.data
version = instance.data.get(
@ -193,7 +955,6 @@ class ExtractBurnin(pype.api.Extractor):
self.log.debug("Output: {}".format(output))
repre_update = {
"anatomy_template": "render",
"files": movieFileBurnin,
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]

File diff suppressed because it is too large Load diff

View file

@ -26,47 +26,60 @@ class ExtractReviewSlate(pype.api.Extractor):
slate_path = inst_data.get("slateFrame")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# values are set in ExtractReview
to_width = inst_data["reviewToWidth"]
to_height = inst_data["reviewToHeight"]
slate_stream = pype.lib.ffprobe_streams(slate_path)[0]
slate_width = slate_stream["width"]
slate_height = slate_stream["height"]
if "reviewToWidth" in inst_data:
use_legacy_code = True
else:
use_legacy_code = False
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
fps = inst_data.get("fps")
# defining image ratios
resolution_ratio = ((float(resolution_width) * pixel_aspect) /
resolution_height)
delivery_ratio = float(to_width) / float(to_height)
self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio))
self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio))
# get scale factor
scale_factor = float(to_height) / (
resolution_height * pixel_aspect)
# shorten two decimals long float number for testing conditions
resolution_ratio_test = float(
"{:0.2f}".format(resolution_ratio))
delivery_ratio_test = float(
"{:0.2f}".format(delivery_ratio))
if resolution_ratio_test < delivery_ratio_test:
scale_factor = float(to_width) / (
resolution_width * pixel_aspect)
self.log.debug("__ scale_factor: `{}`".format(scale_factor))
for i, repre in enumerate(inst_data["representations"]):
_remove_at_end = []
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
for idx, repre in enumerate(inst_data["representations"]):
self.log.debug("repre ({}): `{}`".format(idx + 1, repre))
p_tags = repre.get("tags", [])
if "slate-frame" not in p_tags:
continue
# values are set in ExtractReview
if use_legacy_code:
to_width = inst_data["reviewToWidth"]
to_height = inst_data["reviewToHeight"]
else:
to_width = repre["resolutionWidth"]
to_height = repre["resolutionHeight"]
# defining image ratios
resolution_ratio = (
(float(slate_width) * pixel_aspect) / slate_height
)
delivery_ratio = float(to_width) / float(to_height)
self.log.debug("resolution_ratio: `{}`".format(resolution_ratio))
self.log.debug("delivery_ratio: `{}`".format(delivery_ratio))
# get scale factor
scale_factor_by_height = float(to_height) / slate_height
scale_factor_by_width = float(to_width) / (
slate_width * pixel_aspect
)
# shorten two decimals long float number for testing conditions
resolution_ratio_test = float("{:0.2f}".format(resolution_ratio))
delivery_ratio_test = float("{:0.2f}".format(delivery_ratio))
self.log.debug("__ scale_factor_by_width: `{}`".format(
scale_factor_by_width
))
self.log.debug("__ scale_factor_by_height: `{}`".format(
scale_factor_by_height
))
_remove_at_end = []
stagingdir = repre["stagingDir"]
input_file = "{0}".format(repre["files"])
@ -84,21 +97,27 @@ class ExtractReviewSlate(pype.api.Extractor):
input_args = []
output_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(repre["_profile"].get('input', []))
if use_legacy_code:
input_args.extend(repre["_profile"].get('input', []))
else:
input_args.extend(repre["outputDef"].get('input', []))
input_args.append("-loop 1 -i {}".format(slate_path))
input_args.extend([
"-r {}".format(fps),
"-t 0.04"]
)
# output args
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
if use_legacy_code:
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
else:
# Codecs are copied from source for whole input
codec_args = self.codec_args(repre)
output_args.extend(codec_args)
# make sure colors are correct
output_args.extend([
@ -109,34 +128,37 @@ class ExtractReviewSlate(pype.api.Extractor):
])
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if (
# Always scale slate if not legacy
not use_legacy_code or
# Legacy code required reformat tag
(use_legacy_code and "reformat" in p_tags)
):
if resolution_ratio_test < delivery_ratio_test:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale) / 2)
width_scale = int(slate_width * scale_factor_by_height)
width_half_pad = int((to_width - width_scale) / 2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / (float(
resolution_width) * pixel_aspect)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale) / 2)
height_scale = int(slate_height * scale_factor_by_width)
height_half_pad = int((to_height - height_scale) / 2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
"__ width_scale: `{}`".format(width_scale)
)
self.log.debug(
"__ width_half_pad: `{}`".format(width_half_pad))
"__ width_half_pad: `{}`".format(width_half_pad)
)
self.log.debug(
"__ height_scale: `{}`".format(height_scale))
"__ height_scale: `{}`".format(height_scale)
)
self.log.debug(
"__ height_half_pad: `{}`".format(height_half_pad))
"__ height_half_pad: `{}`".format(height_half_pad)
)
scaling_arg = ("scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1").format(
@ -144,10 +166,12 @@ class ExtractReviewSlate(pype.api.Extractor):
width_half_pad, height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
vf_back = self.add_video_filter_args(output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# overrides output file
output_args.append("-y")
slate_v_path = slate_path.replace(".png", ext)
output_args.append(slate_v_path)
@ -206,10 +230,10 @@ class ExtractReviewSlate(pype.api.Extractor):
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]
}
inst_data["representations"][i].update(repre_update)
inst_data["representations"][idx].update(repre_update)
self.log.debug(
"_ representation {}: `{}`".format(
i, inst_data["representations"][i]))
idx, inst_data["representations"][idx]))
# removing temp files
for f in _remove_at_end:
@ -260,3 +284,39 @@ class ExtractReviewSlate(pype.api.Extractor):
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back
def codec_args(self, repre):
"""Detect possible codec arguments from representation."""
codec_args = []
# Get one filename of representation files
filename = repre["files"]
# If files is list then pick first filename in list
if isinstance(filename, (tuple, list)):
filename = filename[0]
# Get full path to the file
full_input_path = os.path.join(repre["stagingDir"], filename)
try:
# Get information about input file via ffprobe tool
streams = pype.lib.ffprobe_streams(full_input_path)
except Exception:
self.log.warning(
"Could not get codec data from input.",
exc_info=True
)
return codec_args
codec_name = streams[0].get("codec_name")
if codec_name:
codec_args.append("-codec:v {}".format(codec_name))
profile_name = streams[0].get("profile")
if profile_name:
profile_name = profile_name.replace(" ", "_").lower()
codec_args.append("-profile:v {}".format(profile_name))
pix_fmt = streams[0].get("pix_fmt")
if pix_fmt:
codec_args.append("-pix_fmt {}".format(pix_fmt))
return codec_args

View file

@ -356,8 +356,11 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin):
_anatomy_filled = anatomy.format(anatomy_data)
_template_filled = _anatomy_filled["master"]["path"]
head, tail = _template_filled.split(frame_splitter)
padding = (
anatomy.templates["render"]["padding"]
padding = int(
anatomy.templates["render"].get(
"frame_padding",
anatomy.templates["render"].get("padding")
)
)
dst_col = clique.Collection(
@ -481,9 +484,6 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin):
def copy_file(self, src_path, dst_path):
# TODO check drives if are the same to check if cas hardlink
dst_path = self.path_root_check(dst_path)
src_path = self.path_root_check(src_path)
dirname = os.path.dirname(dst_path)
try:
@ -513,75 +513,6 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin):
shutil.copy(src_path, dst_path)
def path_root_check(self, path):
normalized_path = os.path.normpath(path)
forward_slash_path = normalized_path.replace("\\", "/")
drive, _path = os.path.splitdrive(normalized_path)
if os.path.exists(drive + "/"):
key = "drive_check{}".format(drive)
if key not in self.path_checks:
self.log.debug(
"Drive \"{}\" exist. Nothing to change.".format(drive)
)
self.path_checks.append(key)
return normalized_path
path_env_key = "PYPE_STUDIO_PROJECTS_PATH"
mount_env_key = "PYPE_STUDIO_PROJECTS_MOUNT"
missing_envs = []
if path_env_key not in os.environ:
missing_envs.append(path_env_key)
if mount_env_key not in os.environ:
missing_envs.append(mount_env_key)
if missing_envs:
key = "missing_envs"
if key not in self.path_checks:
self.path_checks.append(key)
_add_s = ""
if len(missing_envs) > 1:
_add_s = "s"
self.log.warning((
"Can't replace MOUNT drive path to UNC path due to missing"
" environment variable{}: `{}`. This may cause issues"
" during publishing process."
).format(_add_s, ", ".join(missing_envs)))
return normalized_path
unc_root = os.environ[path_env_key].replace("\\", "/")
mount_root = os.environ[mount_env_key].replace("\\", "/")
# --- Remove slashes at the end of mount and unc roots ---
while unc_root.endswith("/"):
unc_root = unc_root[:-1]
while mount_root.endswith("/"):
mount_root = mount_root[:-1]
# ---
if forward_slash_path.startswith(unc_root):
self.log.debug((
"Path already starts with UNC root: \"{}\""
).format(unc_root))
return normalized_path
if not forward_slash_path.startswith(mount_root):
self.log.warning((
"Path do not start with MOUNT root \"{}\" "
"set in environment variable \"{}\""
).format(unc_root, mount_env_key))
return normalized_path
# Replace Mount root with Unc root
path = unc_root + forward_slash_path[len(mount_root):]
return os.path.normpath(path)
def version_from_representations(self, repres):
for repre in repres:
version = io.find_one({"_id": repre["parent"]})

View file

@ -5,6 +5,7 @@ import sys
import copy
import clique
import errno
import six
from pymongo import DeleteOne, InsertOne
import pyblish.api
@ -40,10 +41,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
'name': representation name (usually the same as extension)
'ext': file extension
optional data
'anatomy_template': 'publish' or 'render', etc.
template from anatomy that should be used for
integrating this file. Only the first level can
be specified right now.
"frameStart"
"frameEnd"
'fps'
@ -92,6 +89,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"family", "hierarchy", "task", "username"
]
default_template_name = "publish"
template_name_profiles = None
def process(self, instance):
@ -268,6 +266,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if 'transfers' not in instance.data:
instance.data['transfers'] = []
template_name = self.template_name_from_instance(instance)
published_representations = {}
for idx, repre in enumerate(instance.data["representations"]):
published_files = []
@ -292,9 +292,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if repre.get('stagingDir'):
stagingdir = repre['stagingDir']
template_name = (
repre.get('anatomy_template') or self.default_template_name
)
if repre.get("outputName"):
template_data["output"] = repre['outputName']
@ -304,6 +301,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
sequence_repre = isinstance(files, list)
repre_context = None
if sequence_repre:
self.log.debug(
"files: {}".format(files))
src_collections, remainder = clique.assemble(files)
self.log.debug(
"src_tail_collections: {}".format(str(src_collections)))
@ -331,6 +330,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
test_dest_files.append(
os.path.normpath(template_filled)
)
template_data["frame"] = repre_context["frame"]
self.log.debug(
"test_dest_files: {}".format(str(test_dest_files)))
@ -343,9 +343,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = None
if repre.get("frameStart"):
frame_start_padding = (
anatomy.templates["render"]["padding"]
frame_start_padding = int(
anatomy.templates["render"].get(
"frame_padding",
anatomy.templates["render"].get("padding")
)
)
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
@ -394,7 +398,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_start_frame,
dst_tail
).replace("..", ".")
repre['published_path'] = self.unc_convert(dst)
repre['published_path'] = dst
else:
# Single file
@ -422,7 +426,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data["transfers"].append([src, dst])
published_files.append(dst)
repre['published_path'] = self.unc_convert(dst)
repre['published_path'] = dst
self.log.debug("__ dst: {}".format(dst))
repre["publishedFiles"] = published_files
@ -526,23 +530,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def unc_convert(self, path):
self.log.debug("> __ path: `{}`".format(path))
drive, _path = os.path.splitdrive(path)
self.log.debug("> __ drive, _path: `{}`, `{}`".format(drive, _path))
if not os.path.exists(drive + "/"):
self.log.info("Converting to unc from environments ..")
path_replace = os.getenv("PYPE_STUDIO_PROJECTS_PATH")
path_mount = os.getenv("PYPE_STUDIO_PROJECTS_MOUNT")
if "/" in path_mount:
path = path.replace(path_mount[0:-1], path_replace)
else:
path = path.replace(path_mount, path_replace)
return path
def copy_file(self, src, dst):
""" Copy given source to destination
@ -552,8 +539,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
Returns:
None
"""
src = self.unc_convert(src)
dst = self.unc_convert(dst)
src = os.path.normpath(src)
dst = os.path.normpath(dst)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
@ -569,16 +554,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# copy file with speedcopy and check if size of files are simetrical
while True:
copyfile(src, dst)
try:
copyfile(src, dst)
except OSError as e:
self.log.critical("Cannot copy {} to {}".format(src, dst))
self.log.critical(e)
six.reraise(*sys.exc_info())
if str(getsize(src)) in str(getsize(dst)):
break
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
src = self.unc_convert(src)
dst = self.unc_convert(dst)
try:
os.makedirs(dirname)
except OSError as e:
@ -663,30 +650,35 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
families.append(instance_family)
families += current_families
self.log.debug("Registered root: {}".format(api.registered_root()))
# create relative source path for DB
try:
source = instance.data['source']
except KeyError:
if "source" in instance.data:
source = instance.data["source"]
else:
source = context.data["currentFile"]
self.log.debug("source: {}".format(source))
source = str(source).replace(
os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
api.registered_root()
anatomy = instance.context.data["anatomy"]
success, rootless_path = (
anatomy.find_root_template_from_path(source)
)
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
if success:
source = rootless_path
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(source))
self.log.debug("Source: {}".format(source))
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get(
"fps", instance.data.get("fps"))}
version_data = {
"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get(
"fps", instance.data.get("fps")
)
}
intent_value = instance.context.data.get("intent")
if intent_value and isinstance(intent_value, dict):
@ -705,3 +697,70 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
version_data[key] = instance.data[key]
return version_data
def main_family_from_instance(self, instance):
"""Returns main family of entered instance."""
family = instance.data.get("family")
if not family:
family = instance.data["families"][0]
return family
def template_name_from_instance(self, instance):
template_name = self.default_template_name
if not self.template_name_profiles:
self.log.debug((
"Template name profiles are not set."
" Using default \"{}\""
).format(template_name))
return template_name
# Task name from session?
task_name = io.Session.get("AVALON_TASK")
family = self.main_family_from_instance(instance)
matching_profiles = None
highest_value = -1
self.log.info(self.template_name_profiles)
for name, filters in self.template_name_profiles.items():
value = 0
families = filters.get("families")
if families:
if family not in families:
continue
value += 1
tasks = filters.get("tasks")
if tasks:
if task_name not in tasks:
continue
value += 1
if value > highest_value:
matching_profiles = {}
highest_value = value
if value == highest_value:
matching_profiles[name] = filters
if len(matching_profiles) == 1:
template_name = tuple(matching_profiles.keys())[0]
self.log.debug(
"Using template name \"{}\".".format(template_name)
)
elif len(matching_profiles) > 1:
template_name = tuple(matching_profiles.keys())[0]
self.log.warning((
"More than one template profiles matched"
" Family \"{}\" and Task: \"{}\"."
" Using first template name in row \"{}\"."
).format(family, task_name, template_name))
else:
self.log.debug((
"None of template profiles matched"
" Family \"{}\" and Task: \"{}\"."
" Using default template name \"{}\""
).format(family, task_name, template_name))
return template_name

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
"""Submit publishing job to farm."""
import os
import json
import re
@ -10,7 +13,7 @@ import pyblish.api
def _get_script():
"""Get path to the image sequence script"""
"""Get path to the image sequence script."""
try:
from pype.scripts import publish_filesequence
except Exception:
@ -20,17 +23,11 @@ def _get_script():
if module_path.endswith(".pyc"):
module_path = module_path[: -len(".pyc")] + ".py"
module_path = os.path.normpath(module_path)
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"])
network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"])
module_path = module_path.replace(mount_root, network_root)
return module_path
return os.path.normpath(module_path)
# Logic to retrieve latest files concerning extendFrames
def get_latest_version(asset_name, subset_name, family):
"""Retrieve latest files concerning extendFrame feature."""
# Get asset
asset_name = io.find_one(
{"type": "asset", "name": asset_name}, projection={"name": True}
@ -64,9 +61,7 @@ def get_latest_version(asset_name, subset_name, family):
def get_resources(version, extension=None):
"""
Get the files from the specific version
"""
"""Get the files from the specific version."""
query = {"type": "representation", "parent": version["_id"]}
if extension:
query["name"] = extension
@ -86,14 +81,25 @@ def get_resources(version, extension=None):
return resources
def get_resource_files(resources, frame_range, override=True):
def get_resource_files(resources, frame_range=None):
"""Get resource files at given path.
If `frame_range` is specified those outside will be removed.
Arguments:
resources (list): List of resources
frame_range (list): Frame range to apply override
Returns:
list of str: list of collected resources
"""
res_collections, _ = clique.assemble(resources)
assert len(res_collections) == 1, "Multiple collections found"
res_collection = res_collections[0]
# Remove any frames
if override:
if frame_range is not None:
for frame in frame_range:
if frame not in res_collection.indexes:
continue
@ -149,10 +155,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"PYPE_LOG_NO_COLORS",
"PYPE_METADATA_FILE",
"AVALON_PROJECT",
"PYPE_PYTHON_EXE"
"PYPE_LOG_NO_COLORS"
]
# custom deadline atributes
@ -178,10 +183,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
families_transfer = ["render3d", "render2d", "ftrack", "slate"]
def _submit_deadline_post_job(self, instance, job):
"""
"""Submit publish job to Deadline.
Deadline specific code separated from :meth:`process` for sake of
more universal code. Muster post job is sent directly by Muster
submitter, so this type of code isn't necessary for it.
"""
data = instance.data.copy()
subset = data["subset"]
@ -189,14 +196,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
batch=job["Props"]["Name"], subset=subset
)
metadata_filename = "{}_metadata.json".format(subset)
output_dir = instance.data["outputDir"]
metadata_path = os.path.join(output_dir, metadata_filename)
metadata_path = os.path.normpath(metadata_path)
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"])
network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"]
metadata_path = metadata_path.replace(mount_root, network_root)
# Convert output dir to `{root}/rest/of/path/...` with Anatomy
success, rootless_path = (
self.anatomy.find_root_template_from_path(output_dir)
)
if not success:
# `rootless_path` is not set to `output_dir` if none of roots match
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(output_dir))
rootless_path = output_dir
# Generate the payload for Deadline submission
payload = {
@ -230,6 +241,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Transfer the environment from the original job to this dependent
# job so they use the same environment
metadata_filename = "{}_metadata.json".format(subset)
metadata_path = os.path.join(rootless_path, metadata_filename)
environment = job["Props"].get("Env", {})
environment["PYPE_PYTHON_EXE"] = "//pype/Core/software/python36/python.exe"
environment["PYPE_LOG_NO_COLORS"] = "1"
@ -261,19 +275,22 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# self.log.info(json.dumps(payload, indent=4, sort_keys=True))
url = "{}/api/jobs".format(self.DEADLINE_REST_URL)
response = requests.post(url, json=payload)
response = requests.post(url, json=payload, timeout=10)
if not response.ok:
raise Exception(response.text)
def _copy_extend_frames(self, instance, representation):
"""
"""Copy existing frames from latest version.
This will copy all existing frames from subset's latest version back
to render directory and rename them to what renderer is expecting.
:param instance: instance to get required data from
:type instance: pyblish.plugin.Instance
"""
Arguments:
instance (pyblish.plugin.Instance): instance to get required
data from
representation (dict): presentation to operate on
"""
import speedcopy
self.log.info("Preparing to copy ...")
@ -313,9 +330,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# type
assert fn is not None, "padding string wasn't found"
# list of tuples (source, destination)
staging = representation.get("stagingDir")
staging = self.anatomy.fill_roots(staging)
resource_files.append(
(frame,
os.path.join(representation.get("stagingDir"),
os.path.join(staging,
"{}{}{}".format(pre,
fn.group("frame"),
post)))
@ -335,19 +354,20 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"Finished copying %i files" % len(resource_files))
def _create_instances_for_aov(self, instance_data, exp_files):
"""
"""Create instance for each AOV found.
This will create new instance for every aov it can detect in expected
files list.
:param instance_data: skeleton data for instance (those needed) later
by collector
:type instance_data: pyblish.plugin.Instance
:param exp_files: list of expected files divided by aovs
:type exp_files: list
:returns: list of instances
:rtype: list(publish.plugin.Instance)
"""
Arguments:
instance_data (pyblish.plugin.Instance): skeleton data for instance
(those needed) later by collector
exp_files (list): list of expected files divided by aovs
Returns:
list of instances
"""
task = os.environ["AVALON_TASK"]
subset = instance_data["subset"]
instances = []
@ -371,6 +391,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
subset_name = '{}_{}'.format(group_name, aov)
staging = os.path.dirname(list(cols[0])[0])
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
self.log.info("Creating data for: {}".format(subset_name))
@ -396,7 +426,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"frameEnd": int(instance_data.get("frameEndHandle")),
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"anatomy_template": "render",
"fps": new_instance.get("fps"),
"tags": ["review"] if preview else []
}
@ -414,26 +443,28 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
return instances
def _get_representations(self, instance, exp_files):
"""
"""Create representations for file sequences.
This will return representations of expected files if they are not
in hierarchy of aovs. There should be only one sequence of files for
most cases, but if not - we create representation from each of them.
:param instance: instance for which we are setting representations
:type instance: pyblish.plugin.Instance
:param exp_files: list of expected files
:type exp_files: list
:returns: list of representations
:rtype: list(dict)
"""
Arguments:
instance (pyblish.plugin.Instance): instance for which we are
setting representations
exp_files (list): list of expected files
Returns:
list of representations
"""
representations = []
cols, rem = clique.assemble(exp_files)
collections, remainders = clique.assemble(exp_files)
bake_render_path = instance.get("bakeRenderPath")
# create representation for every collected sequence
for c in cols:
ext = c.tail.lstrip(".")
for collection in collections:
ext = collection.tail.lstrip(".")
preview = False
# if filtered aov name is found in filename, toggle it for
# preview video rendering
@ -442,7 +473,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
for aov in self.aov_filter[app]:
if re.match(
r".+(?:\.|_)({})(?:\.|_).*".format(aov),
list(c)[0]
list(collection)[0]
):
preview = True
break
@ -451,15 +482,26 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
if bake_render_path:
preview = False
staging = os.path.dirname(list(collection)[0])
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
rep = {
"name": ext,
"ext": ext,
"files": [os.path.basename(f) for f in list(c)],
"files": [os.path.basename(f) for f in list(collection)],
"frameStart": int(instance.get("frameStartHandle")),
"frameEnd": int(instance.get("frameEndHandle")),
# If expectedFile are absolute, we need only filenames
"stagingDir": os.path.dirname(list(c)[0]),
"anatomy_template": "render",
"stagingDir": staging,
"fps": instance.get("fps"),
"tags": ["review", "preview"] if preview else [],
}
@ -472,19 +514,30 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
self._solve_families(instance, preview)
# add reminders as representations
for r in rem:
ext = r.split(".")[-1]
for remainder in remainders:
ext = remainder.split(".")[-1]
staging = os.path.dirname(remainder)
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
rep = {
"name": ext,
"ext": ext,
"files": os.path.basename(r),
"stagingDir": os.path.dirname(r),
"anatomy_template": "publish"
"files": os.path.basename(remainder),
"stagingDir": os.path.dirname(remainder),
}
if r in bake_render_path:
if remainder in bake_render_path:
rep.update({
"fps": instance.get("fps"),
"anatomy_template": "render",
"tags": ["review", "delete"]
})
# solve families with `preview` attributes
@ -506,7 +559,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
instance["families"] = families
def process(self, instance):
"""
"""Process plugin.
Detect type of renderfarm submission and create and post dependend job
in case of Deadline. It creates json file with metadata needed for
publishing in directory of render.
@ -517,6 +571,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
data = instance.data.copy()
context = instance.context
self.context = context
self.anatomy = instance.context.data["anatomy"]
if hasattr(instance, "_log"):
data['_log'] = instance._log
@ -576,11 +631,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
except KeyError:
source = context.data["currentFile"]
source = source.replace(
os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root()
success, rootless_path = (
self.anatomy.find_root_template_from_path(source)
)
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
if success:
source = rootless_path
else:
# `rootless_path` is not set to `source` if none of roots match
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues."
).format(source))
families = ["render"]
@ -631,13 +693,29 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# look into instance data if representations are not having any
# which are having tag `publish_on_farm` and include them
for r in instance.data.get("representations", []):
if "publish_on_farm" in r.get("tags"):
for repre in instance.data.get("representations", []):
staging_dir = repre.get("stagingDir")
if staging_dir:
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(
staging_dir
)
)
if success:
repre["stagingDir"] = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging_dir))
repre["stagingDir"] = staging_dir
if "publish_on_farm" in repre.get("tags"):
# create representations attribute of not there
if "representations" not in instance_skeleton_data.keys():
instance_skeleton_data["representations"] = []
instance_skeleton_data["representations"].append(r)
instance_skeleton_data["representations"].append(repre)
instances = None
assert data.get("expectedFiles"), ("Submission from old Pype version"
@ -779,12 +857,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
with open(metadata_path, "w") as f:
json.dump(publish_job, f, indent=4, sort_keys=True)
def _extend_frames(self, asset, subset, start, end, override):
"""
This will get latest version of asset and update frame range based
on minimum and maximuma values
"""
def _extend_frames(self, asset, subset, start, end):
"""Get latest version of asset nad update frame range.
Based on minimum and maximuma values.
Arguments:
asset (str): asset name
subset (str): subset name
start (int): start frame
end (int): end frame
Returns:
(int, int): upddate frame start/end
"""
# Frame comparison
prev_start = None
prev_end = None

View file

@ -122,6 +122,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
workspace = context.data["workspaceDir"]
self._rs = renderSetup.instance()
current_layer = self._rs.getVisibleRenderLayer()
maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()}
self.maya_layers = maya_render_layers
@ -157,6 +158,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
attachTo = []
if sets:
for s in sets:
if "family" not in cmds.listAttr(s):
continue
attachTo.append(
{
"version": None, # we need integrator for that
@ -303,6 +307,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
instance.data.update(data)
self.log.debug("data: {}".format(json.dumps(data, indent=4)))
# Restore current layer.
self.log.info("Restoring to {}".format(current_layer.name()))
self._rs.switchToLayer(current_layer)
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without
@ -397,6 +405,8 @@ class ExpectedFiles:
multipart = False
def get(self, renderer, layer):
renderSetup.instance().switchToLayerUsingLegacyName(layer)
if renderer.lower() == "arnold":
return self._get_files(ExpectedFilesArnold(layer))
elif renderer.lower() == "vray":

View file

@ -73,7 +73,6 @@ class ExtractYetiCache(pype.api.Extractor):
'ext': 'fur',
'files': cache_files[0] if len(cache_files) == 1 else cache_files,
'stagingDir': dirname,
'anatomy_template': 'publish',
'frameStart': int(start_frame),
'frameEnd': int(end_frame)
}
@ -84,8 +83,7 @@ class ExtractYetiCache(pype.api.Extractor):
'name': 'fursettings',
'ext': 'fursettings',
'files': os.path.basename(data_file),
'stagingDir': dirname,
'anatomy_template': 'publish'
'stagingDir': dirname
}
)

View file

@ -169,8 +169,7 @@ class ExtractYetiRig(pype.api.Extractor):
'name': "ma",
'ext': 'ma',
'files': "yeti_rig.ma",
'stagingDir': dirname,
'anatomy_template': 'publish'
'stagingDir': dirname
}
)
self.log.info("settings file: {}".format("yeti.rigsettings"))
@ -179,8 +178,7 @@ class ExtractYetiRig(pype.api.Extractor):
'name': 'rigsettings',
'ext': 'rigsettings',
'files': 'yeti.rigsettings',
'stagingDir': dirname,
'anatomy_template': 'publish'
'stagingDir': dirname
}
)

View file

@ -1,6 +1,17 @@
# -*- coding: utf-8 -*-
"""Submitting render job to Deadline.
This module is taking care of submitting job from Maya to Deadline. It
creates job and set correct environments. Its behavior is controlled by
`DEADLINE_REST_URL` environment variable - pointing to Deadline Web Service
and `MayaSubmitDeadline.use_published (bool)` property telling Deadline to
use published scene workfile or not.
"""
import os
import json
import getpass
import re
import clique
from maya import cmds
@ -14,7 +25,7 @@ import pype.maya.lib as lib
def get_renderer_variables(renderlayer=None):
"""Retrieve the extension which has been set in the VRay settings
"""Retrieve the extension which has been set in the VRay settings.
Will return None if the current renderer is not VRay
For Maya 2016.5 and up the renderSetup creates renderSetupLayer node which
@ -25,16 +36,21 @@ def get_renderer_variables(renderlayer=None):
Returns:
dict
"""
"""
renderer = lib.get_renderer(renderlayer or lib.get_current_renderlayer())
render_attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS["default"])
padding = cmds.getAttr("{}.{}".format(render_attrs["node"],
render_attrs["padding"]))
filename_0 = cmds.renderSettings(fullPath=True, firstImageName=True)[0]
filename_0 = cmds.renderSettings(
fullPath=True,
gin="#" * int(padding),
lut=True,
layer=renderlayer or lib.get_current_renderlayer())[0]
filename_0 = filename_0.replace('_<RenderPass>', '_beauty')
prefix_attr = "defaultRenderGlobals.imageFilePrefix"
if renderer == "vray":
# Maya's renderSettings function does not return V-Ray file extension
# so we get the extension from vraySettings
@ -46,62 +62,33 @@ def get_renderer_variables(renderlayer=None):
if extension is None:
extension = "png"
filename_prefix = "<Scene>/<Scene>_<Layer>/<Layer>"
if extension == "exr (multichannel)" or extension == "exr (deep)":
extension = "exr"
prefix_attr = "vraySettings.fileNamePrefix"
elif renderer == "renderman":
prefix_attr = "rmanGlobals.imageFileFormat"
elif renderer == "redshift":
# mapping redshift extension dropdown values to strings
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
extension = ext_mapping[
cmds.getAttr("redshiftOptions.imageFormat")
]
else:
# Get the extension, getAttr defaultRenderGlobals.imageFormat
# returns an index number.
filename_base = os.path.basename(filename_0)
extension = os.path.splitext(filename_base)[-1].strip(".")
filename_prefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
filename_prefix = cmds.getAttr(prefix_attr)
return {"ext": extension,
"filename_prefix": filename_prefix,
"padding": padding,
"filename_0": filename_0}
def preview_fname(folder, scene, layer, padding, ext):
"""Return output file path with #### for padding.
Deadline requires the path to be formatted with # in place of numbers.
For example `/path/to/render.####.png`
Args:
folder (str): The root output folder (image path)
scene (str): The scene name
layer (str): The layer name to be rendered
padding (int): The padding length
ext(str): The output file extension
Returns:
str
"""
fileprefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
output = fileprefix + ".{number}.{ext}"
# RenderPass is currently hardcoded to "beauty" because its not important
# for the deadline submission, but we will need something to replace
# "<RenderPass>".
mapping = {
"<Scene>": "{scene}",
"<RenderLayer>": "{layer}",
"RenderPass": "beauty"
}
for key, value in mapping.items():
output = output.replace(key, value)
output = output.format(
scene=scene,
layer=layer,
number="#" * padding,
ext=ext
)
return os.path.join(folder, output)
class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit available render layers to Deadline
"""Submit available render layers to Deadline.
Renders are submitted to a Deadline Web Service as
supplied via the environment variable DEADLINE_REST_URL
@ -194,22 +181,22 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
scene = os.path.splitext(filename)[0]
dirname = os.path.join(workspace, "renders")
renderlayer = instance.data['setMembers'] # rs_beauty
renderlayer_name = instance.data['subset'] # beauty
# renderlayer_globals = instance.data["renderGlobals"]
# legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
deadline_user = context.data.get("deadlineUser", getpass.getuser())
jobname = "%s - %s" % (filename, instance.name)
# Get the variables depending on the renderer
render_variables = get_renderer_variables(renderlayer)
output_filename_0 = preview_fname(folder=dirname,
scene=scene,
layer=renderlayer_name,
padding=render_variables["padding"],
ext=render_variables["ext"])
filename_0 = render_variables["filename_0"]
if self.use_published:
new_scene = os.path.splitext(filename)[0]
orig_scene = os.path.splitext(
os.path.basename(context.data["currentFile"]))[0]
filename_0 = render_variables["filename_0"].replace(
orig_scene, new_scene)
output_filename_0 = filename_0
try:
# Ensure render folder exists
@ -226,6 +213,9 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
# Top-level group name
"BatchName": filename,
# Asset dependency to wait for at least the scene file to sync.
"AssetDependency0": filepath,
# Job name, as seen in Monitor
"Name": jobname,
@ -284,7 +274,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
for aov, files in exp[0].items():
col = clique.assemble(files)[0][0]
outputFile = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile
payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile # noqa: E501
OutputFilenames[expIndex] = outputFile
expIndex += 1
else:
@ -293,7 +283,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile
# OutputFilenames[expIndex] = outputFile
# We need those to pass them to pype for it to set correct context
keys = [
"FTRACK_API_KEY",
@ -302,7 +291,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"PYPE_USERNAME"
"PYPE_USERNAME",
"PYPE_DEV"
]
environment = dict({key: os.environ[key] for key in keys
@ -334,7 +324,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
raise Exception(response.text)
# Store output dir for unified publisher (filesequence)
instance.data["outputDir"] = os.path.dirname(output_filename_0)
instance.data["outputDir"] = os.path.dirname(filename_0)
instance.data["deadlineSubmissionJob"] = response.json()
def preflight_check(self, instance):
@ -363,6 +353,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
# add 10sec timeout before bailing out
kwargs['timeout'] = 10
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
@ -377,4 +369,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
# add 10sec timeout before bailing out
kwargs['timeout'] = 10
return requests.get(*args, **kwargs)

View file

@ -309,14 +309,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
output_dir = instance.data["outputDir"]
metadata_path = os.path.join(output_dir, metadata_filename)
# replace path for UNC / network share paths, co PYPE is found
# over network. It assumes PYPE is located somewhere in
# PYPE_STUDIO_CORE_PATH
pype_root = os.environ["PYPE_ROOT"].replace(
os.path.normpath(
os.environ['PYPE_STUDIO_CORE_MOUNT']), # noqa
os.path.normpath(
os.environ['PYPE_STUDIO_CORE_PATH'])) # noqa
pype_root = os.environ["PYPE_SETUP_PATH"]
# we must provide either full path to executable or use musters own
# python named MPython.exe, residing directly in muster bin
@ -517,33 +510,25 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
environment["PATH"] = os.environ["PATH"]
# self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS']))
clean_environment = {}
for key in environment:
for key, value in environment.items():
clean_path = ""
self.log.debug("key: {}".format(key))
to_process = environment[key]
if key == "PYPE_STUDIO_CORE_MOUNT":
clean_path = environment[key]
elif "://" in environment[key]:
clean_path = environment[key]
elif os.pathsep not in to_process:
try:
path = environment[key]
path.decode('UTF-8', 'strict')
clean_path = os.path.normpath(path)
except UnicodeDecodeError:
print('path contains non UTF characters')
if "://" in value:
clean_path = value
else:
for path in environment[key].split(os.pathsep):
valid_paths = []
for path in value.split(os.pathsep):
if not path:
continue
try:
path.decode('UTF-8', 'strict')
clean_path += os.path.normpath(path) + os.pathsep
valid_paths.append(os.path.normpath(path))
except UnicodeDecodeError:
print('path contains non UTF characters')
# this should replace paths so they are pointing to network share
clean_path = clean_path.replace(
os.path.normpath(environment['PYPE_STUDIO_CORE_MOUNT']),
os.path.normpath(environment['PYPE_STUDIO_CORE_PATH']))
if valid_paths:
clean_path = os.pathsep.join(valid_paths)
clean_environment[key] = clean_path
return clean_environment

View file

@ -37,50 +37,71 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
project_root = "{}{}{}".format(
os.environ.get("AVALON_PROJECTS"),
os.path.sep,
os.environ.get("AVALON_PROJECT")
)
assert self.maya_is_true(relative_texture) is not True, \
("Texture path is set to be absolute")
assert self.maya_is_true(relative_procedural) is not True, \
("Procedural path is set to be absolute")
texture_search_path = texture_search_path.replace("\\", "/")
procedural_search_path = procedural_search_path.replace("\\", "/")
project_root = project_root.replace("\\", "/")
anatomy = instance.context.data["anatomy"]
assert project_root in texture_search_path, \
("Project root is not in texture_search_path")
assert project_root in procedural_search_path, \
("Project root is not in procedural_search_path")
# Use project root variables for multiplatform support, see:
# https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path
# ':' as path separator is supported by Arnold for all platforms.
keys = anatomy.root_environments().keys()
paths = []
for k in keys:
paths.append("[{}]".format(k))
self.log.info("discovered roots: {}".format(":".join(paths)))
assert ":".join(paths) in texture_search_path, (
"Project roots are not in texture_search_path"
)
assert ":".join(paths) in procedural_search_path, (
"Project roots are not in procedural_search_path"
)
@classmethod
def repair(cls, instance):
texture_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.tspath"
texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath")
procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath")
# Use project root variables for multiplatform support, see:
# https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path
# ':' as path separator is supported by Arnold for all platforms.
anatomy = instance.context.data["anatomy"]
keys = anatomy.root_environments().keys()
paths = []
for k in keys:
paths.append("[{}]".format(k))
cmds.setAttr(
"defaultArnoldRenderOptions.tspath",
":".join([p for p in paths + [texture_path] if p]),
type="string"
)
procedural_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.pspath"
cmds.setAttr(
"defaultArnoldRenderOptions.absolute_texture_paths",
False
)
project_root = "{}{}{}".format(
os.environ.get("AVALON_PROJECTS"),
os.path.sep,
os.environ.get("AVALON_PROJECT"),
).replace("\\", "/")
cmds.setAttr(
"defaultArnoldRenderOptions.pspath",
":".join([p for p in paths + [procedural_path] if p]),
type="string"
)
cmds.setAttr(
"defaultArnoldRenderOptions.absolute_procedural_paths",
False
)
cmds.setAttr("defaultArnoldRenderOptions.tspath",
project_root + os.pathsep + texture_search_path,
type="string")
cmds.setAttr("defaultArnoldRenderOptions.pspath",
project_root + os.pathsep + procedural_search_path,
type="string")
cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths",
False)
cmds.setAttr("defaultArnoldRenderOptions.absolute_texture_paths",
False)
@staticmethod
def find_absolute_path(relative_path, all_root_paths):
for root_path in all_root_paths:
possible_path = os.path.join(root_path, relative_path)
if os.path.exists(possible_path):
return possible_path
def maya_is_true(self, attr_val):
"""

View file

@ -237,7 +237,7 @@ class LoadSequence(api.Loader):
repr_cont = representation["context"]
file = self.fname
file = api.get_representation_path(representation)
if not file:
repr_id = representation["_id"]

View file

@ -79,8 +79,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
representation = {
'name': ext,
'ext': ext,
"stagingDir": output_dir,
"anatomy_template": "render"
"stagingDir": output_dir
}
try:
@ -116,7 +115,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
"colorspace": node["colorspace"].value(),
"colorspace": node["colorspace"].value(),
}
instance.data["family"] = "write"
@ -151,6 +150,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"deadlinePriority": deadlinePriority
})
if "render" in families:
instance.data["family"] = "render2d"
if "render" not in families:
instance.data["families"].insert(0, "render")
if "prerender" in families:
instance.data.update({
"family": "prerender",

View file

@ -71,8 +71,7 @@ class NukeRenderLocal(pype.api.Extractor):
'ext': ext,
'frameStart': "%0{}d".format(len(str(last_frame))) % first_frame,
'files': collected_frames,
"stagingDir": out_dir,
"anatomy_template": "render"
"stagingDir": out_dir
}
instance.data["representations"].append(repre)

View file

@ -130,7 +130,6 @@ class ExtractThumbnail(pype.api.Extractor):
"stagingDir": staging_dir,
"frameStart": first_frame,
"frameEnd": last_frame,
"anatomy_template": "render",
"tags": tags
}
instance.data["representations"].append(repre)

View file

@ -128,6 +128,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
# Top-level group name
"BatchName": script_name,
# Asset dependency to wait for at least the scene file to sync.
"AssetDependency0": script_path,
# Job name, as seen in Monitor
"Name": jobname,
@ -180,7 +183,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"BatchName": responce_data["Props"]["Batch"],
"JobDependency0": responce_data["_id"],
"ChunkSize": 99999999
})
})
# Include critical environment variables with submission
keys = [
@ -192,7 +195,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"FTRACK_SERVER",
"PYBLISHPLUGINPATH",
"NUKE_PATH",
"TOOL_ENV"
"TOOL_ENV",
"PYPE_DEV"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
@ -201,40 +205,32 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
if path.lower().startswith('pype_'):
environment[path] = os.environ[path]
environment["PATH"] = os.environ["PATH"]
# environment["PATH"] = os.environ["PATH"]
# self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS']))
clean_environment = {}
for key in environment:
for key, value in environment.items():
clean_path = ""
self.log.debug("key: {}".format(key))
to_process = environment[key]
if key == "PYPE_STUDIO_CORE_MOUNT":
clean_path = environment[key]
elif "://" in environment[key]:
clean_path = environment[key]
elif os.pathsep not in to_process:
try:
path = environment[key]
path.decode('UTF-8', 'strict')
clean_path = os.path.normpath(path)
except UnicodeDecodeError:
print('path contains non UTF characters')
if "://" in value:
clean_path = value
else:
for path in environment[key].split(os.pathsep):
valid_paths = []
for path in value.split(os.pathsep):
if not path:
continue
try:
path.decode('UTF-8', 'strict')
clean_path += os.path.normpath(path) + os.pathsep
valid_paths.append(os.path.normpath(path))
except UnicodeDecodeError:
print('path contains non UTF characters')
if valid_paths:
clean_path = os.pathsep.join(valid_paths)
if key == "PYTHONPATH":
clean_path = clean_path.replace('python2', 'python3')
clean_path = clean_path.replace(
os.path.normpath(
environment['PYPE_STUDIO_CORE_MOUNT']), # noqa
os.path.normpath(
environment['PYPE_STUDIO_CORE_PATH'])) # noqa
self.log.debug("clean path: {}".format(clean_path))
clean_environment[key] = clean_path
environment = clean_environment
@ -256,7 +252,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
self.expected_files(instance, render_path)
self.log.debug("__ expectedFiles: `{}`".format(
instance.data["expectedFiles"]))
response = requests.post(self.deadline_url, json=payload)
response = requests.post(self.deadline_url, json=payload, timeout=10)
if not response.ok:
raise Exception(response.text)

View file

@ -0,0 +1,90 @@
import pyblish.api
import opentimelineio.opentime as otio_ot
class CollectClipTimecodes(pyblish.api.InstancePlugin):
"""Collect time with OpenTimelineIO:
source_h(In,Out)[timecode, sec]
timeline(In,Out)[timecode, sec]
"""
order = pyblish.api.CollectorOrder + 0.101
label = "Collect Timecodes"
hosts = ["nukestudio"]
def process(self, instance):
data = dict()
self.log.debug("__ instance.data: {}".format(instance.data))
# Timeline data.
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
source_in_h = instance.data("sourceInH",
instance.data("sourceIn") - handle_start)
source_out_h = instance.data("sourceOutH",
instance.data("sourceOut") + handle_end)
timeline_in = instance.data["clipIn"]
timeline_out = instance.data["clipOut"]
# set frame start with tag or take it from timeline
frame_start = instance.data.get("startingFrame")
if not frame_start:
frame_start = timeline_in
source = instance.data.get("source")
otio_data = dict()
self.log.debug("__ source: `{}`".format(source))
rate_fps = instance.context.data["fps"]
otio_in_h_ratio = otio_ot.RationalTime(
value=(source.timecodeStart() + (
source_in_h + (source_out_h - source_in_h))),
rate=rate_fps)
otio_out_h_ratio = otio_ot.RationalTime(
value=(source.timecodeStart() + source_in_h),
rate=rate_fps)
otio_timeline_in_ratio = otio_ot.RationalTime(
value=int(
instance.data.get("timelineTimecodeStart", 0)) + timeline_in,
rate=rate_fps)
otio_timeline_out_ratio = otio_ot.RationalTime(
value=int(
instance.data.get("timelineTimecodeStart", 0)) + timeline_out,
rate=rate_fps)
otio_data.update({
"otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio),
"otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio),
"otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio),
"otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio),
"otioTimelineInTimecode": otio_ot.to_timecode(
otio_timeline_in_ratio),
"otioTimelineOutTimecode": otio_ot.to_timecode(
otio_timeline_out_ratio),
"otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio),
"otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio)
})
data.update({
"otioData": otio_data,
"sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio),
"sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio)
})
instance.data.update(data)
self.log.debug("data: {}".format(instance.data))

View file

@ -0,0 +1,21 @@
import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder + 0.101
label = "Collect Clip Resoluton"
hosts = ["nukestudio"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
instance.data.update({
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
})

View file

@ -47,11 +47,42 @@ class CollectClips(api.ContextPlugin):
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
clip_in = int(item.timelineIn())
clip_out = int(item.timelineOut())
file_head = source.filenameHead()
file_info = next((f for f in source.fileinfos()), None)
source_first_frame = file_info.startFrame()
source_first_frame = int(file_info.startFrame())
is_sequence = False
self.log.debug(
"__ assets_shared: {}".format(
context.data["assetsShared"]))
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
clip_matching_with_range = next(
(k for k, v in context.data["assetsShared"].items()
if (v.get("_clipIn", 0) == clip_in)
and (v.get("_clipOut", 0) == clip_out)
), False)
# check if clip name is the same in matched
# vertically neighbouring clip
# if it is then it is correct and resent variable to False
# not to be rised wrong name exception
if asset in str(clip_matching_with_range):
clip_matching_with_range = False
# rise wrong name exception if found one
assert (not clip_matching_with_range), (
"matching clip: {asset}"
" timeline range ({clip_in}:{clip_out})"
" conflicting with {clip_matching_with_range}"
" >> rename any of clips to be the same as the other <<"
).format(
**locals())
if not source.singleFile():
self.log.info("Single file")
is_sequence = True
@ -89,32 +120,31 @@ class CollectClips(api.ContextPlugin):
)
data.update({
"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"timecodeStart": str(source.timecodeStart()),
"timelineTimecodeStart": str(sequence.timecodeStart()),
"sourcePath": source_path,
"sourceFileHead": file_head,
"isSequence": is_sequence,
"track": track.name(),
"trackIndex": track_index,
"sourceFirst": source_first_frame,
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"mediaDuration": (int(item.sourceOut()) -
int(item.sourceIn())) + 1,
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut()),
"clipDuration": (
int(item.timelineOut()) - int(
item.timelineIn())) + 1,
"asset": asset,
"family": "clip",
"families": [],
"handleStart": projectdata.get("handleStart", 0),
"handleEnd": projectdata.get("handleEnd", 0)})
"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"timecodeStart": str(source.timecodeStart()),
"timelineTimecodeStart": str(sequence.timecodeStart()),
"sourcePath": source_path,
"sourceFileHead": file_head,
"isSequence": is_sequence,
"track": track.name(),
"trackIndex": track_index,
"sourceFirst": source_first_frame,
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"mediaDuration": int(source.duration()),
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": (
int(item.timelineOut()) - int(
item.timelineIn())) + 1,
"asset": asset,
"family": "clip",
"families": [],
"handleStart": projectdata.get("handleStart", 0),
"handleEnd": projectdata.get("handleEnd", 0)})
instance = context.create_instance(**data)
@ -122,7 +152,10 @@ class CollectClips(api.ContextPlugin):
self.log.info("Created instance.data: {}".format(instance.data))
self.log.debug(">> effects: {}".format(instance.data["effects"]))
context.data["assetsShared"][asset] = dict()
context.data["assetsShared"][asset] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}
# from now we are collecting only subtrackitems on
# track with no video items

View file

@ -35,14 +35,15 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin):
frame_end = frame_start + (timeline_out - timeline_in)
data.update(
{
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h
data.update({
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h,
"clipDurationH": instance.data.get(
"clipDuration") + handle_start + handle_end
}
)
self.log.debug("__ data: {}".format(data))

View file

@ -1,5 +1,6 @@
from pyblish import api
class CollectFramerate(api.ContextPlugin):
"""Collect framerate from selected sequence."""
@ -9,4 +10,13 @@ class CollectFramerate(api.ContextPlugin):
def process(self, context):
sequence = context.data["activeSequence"]
context.data["fps"] = sequence.framerate().toFloat()
context.data["fps"] = self.get_rate(sequence)
def get_rate(self, sequence):
num, den = sequence.framerate().toRational()
rate = float(num) / float(den)
if rate.is_integer():
return rate
return round(rate, 3)

View file

@ -37,11 +37,13 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
assets_shared = context.data.get("assetsShared")
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
asset = instance.data["asset"]
sequence = context.data['activeSequence']
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
resolution_width = instance.data["resolutionWidth"]
resolution_height = instance.data["resolutionHeight"]
pixel_aspect = instance.data["pixelAspect"]
clip_in = instance.data["clipIn"]
clip_out = instance.data["clipOut"]
fps = context.data["fps"]
# build data for inner nukestudio project property
@ -72,6 +74,31 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
# and finding only hierarchical tag
if "hierarchy" in t_type.lower():
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
match = next((
k for k, v in assets_shared.items()
if (v["_clipIn"] == clip_in)
and (v["_clipOut"] == clip_out)
), False)
self.log.debug(
"__ assets_shared[match]: {}".format(
assets_shared[match]))
# check if hierarchy key is present in matched
# vertically neighbouring clip
if not assets_shared[match].get("hierarchy"):
match = False
# rise exception if multiple hierarchy tag found
assert not match, (
"Two clips above each other with"
" hierarchy tag are not allowed"
" >> keep hierarchy tag only in one of them <<"
)
d_metadata = dict()
parents = list()
@ -82,7 +109,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
if "shot" in template.lower():
instance.data["asset"] = [
t for t in template.split('/')][-1]
template = "/".join([t for t in template.split('/')][0:-1])
template = "/".join(
[t for t in template.split('/')][0:-1])
# take template from Tag.note and break it into parts
template_split = template.split("/")
@ -149,8 +177,12 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
self.log.info(
"clip: {asset}[{clip_in}:{clip_out}]".format(
**locals()))
# adding to asset shared dict
self.log.debug("__ assets_shared: {}".format(assets_shared))
self.log.debug(
"__ assets_shared: {}".format(assets_shared))
if assets_shared.get(asset):
self.log.debug("Adding to shared assets: `{}`".format(
asset))
@ -162,11 +194,11 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
"resolutionWidth": width,
"resolutionHeight": height,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect,
"fps": fps,
"tasks": instance.data["tasks"]
"tasks": instance.data["tasks"]
})
# adding frame start if any on instance
@ -175,8 +207,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
asset_shared.update({
"startingFrame": start_frame
})
self.log.debug(
"assets_shared: {assets_shared}".format(**locals()))
class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''Collecting Hierarchy from instaces and building

View file

@ -64,15 +64,15 @@ class CollectPlates(api.InstancePlugin):
# adding SourceResolution if Tag was present
if instance.data.get("sourceResolution") and instance.data.get("main"):
item = instance.data["item"]
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = int(item.source().mediaSource().pixelAspect())
self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
width, height, pixel_aspect))
resolution_width, resolution_height, pixel_aspect))
data.update({
"width": width,
"height": height,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
})
@ -102,9 +102,6 @@ class CollectPlatesData(api.InstancePlugin):
instance.data["representations"] = list()
version_data = dict()
context = instance.context
anatomy = context.data.get("anatomy", None)
padding = int(anatomy.templates['render']['padding'])
name = instance.data["subset"]
source_path = instance.data["sourcePath"]
@ -149,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin):
source_first_frame = instance.data.get("sourceFirst")
source_file_head = instance.data.get("sourceFileHead")
self.log.debug("source_first_frame: `{}`".format(source_first_frame))
if instance.data.get("isSequence", False):
self.log.info("Is sequence of files")
@ -185,8 +183,7 @@ class CollectPlatesData(api.InstancePlugin):
"frameEnd": instance.data["sourceOut"] - instance.data["sourceIn"] + 1,
'step': 1,
'fps': instance.context.data["fps"],
'preview': True,
'thumbnail': False,
'tags': ["preview"],
'name': "preview",
'ext': "mov",
}

View file

@ -1,15 +0,0 @@
import pyblish.api
import avalon.api as avalon
import os
class CollectActiveProjectRoot(pyblish.api.ContextPlugin):
"""Inject the active project into context"""
label = "Collect Project Root"
order = pyblish.api.CollectorOrder - 0.1
def process(self, context):
S = avalon.Session
context.data["projectroot"] = os.path.normpath(
os.path.join(S['AVALON_PROJECTS'], S['AVALON_PROJECT'])
)

View file

@ -36,9 +36,10 @@ class CollectReviews(api.InstancePlugin):
return
if not track:
self.log.debug(
"Skipping \"{}\" because tag is not having `track` in metadata".format(instance)
)
self.log.debug((
"Skipping \"{}\" because tag is not having"
"`track` in metadata"
).format(instance))
return
# add to representations
@ -68,18 +69,17 @@ class CollectReviews(api.InstancePlugin):
rev_inst.data["name"]))
if rev_inst is None:
raise RuntimeError(
"TrackItem from track name `{}` has to be also selected".format(
track)
)
raise RuntimeError((
"TrackItem from track name `{}` has to"
"be also selected"
).format(track))
instance.data["families"].append("review")
file_path = rev_inst.data.get("sourcePath")
file_dir = os.path.dirname(file_path)
file = os.path.basename(file_path)
ext = os.path.splitext(file)[-1][1:]
handleStart = rev_inst.data.get("handleStart")
handleEnd = rev_inst.data.get("handleEnd")
# change label
instance.data["label"] = "{0} - {1} - ({2}) - review".format(
@ -94,15 +94,35 @@ class CollectReviews(api.InstancePlugin):
"stagingDir": file_dir,
"frameStart": rev_inst.data.get("sourceIn"),
"frameEnd": rev_inst.data.get("sourceOut"),
"frameStartFtrack": rev_inst.data.get("sourceIn") - handleStart,
"frameEndFtrack": rev_inst.data.get("sourceOut") + handleEnd,
"frameStartFtrack": rev_inst.data.get("sourceInH"),
"frameEndFtrack": rev_inst.data.get("sourceOutH"),
"step": 1,
"fps": rev_inst.data.get("fps"),
"preview": True,
"thumbnail": False,
"name": "preview",
"tags": ["preview"],
"ext": ext
}
media_duration = instance.data.get("mediaDuration")
clip_duration_h = instance.data.get("clipDurationH")
if media_duration > clip_duration_h:
self.log.debug("Media duration higher: {}".format(
(media_duration - clip_duration_h)))
representation.update({
"frameStart": instance.data.get("sourceInH"),
"frameEnd": instance.data.get("sourceOutH"),
"tags": ["_cut-bigger", "delete"]
})
elif media_duration < clip_duration_h:
self.log.debug("Media duration higher: {}".format(
(media_duration - clip_duration_h)))
representation.update({
"frameStart": instance.data.get("sourceInH"),
"frameEnd": instance.data.get("sourceOutH"),
"tags": ["_cut-smaller", "delete"]
})
instance.data["representations"].append(representation)
self.log.debug("Added representation: {}".format(representation))
@ -122,15 +142,18 @@ class CollectReviews(api.InstancePlugin):
thumb_path = os.path.join(staging_dir, thumb_file)
self.log.debug("__ thumb_path: {}".format(thumb_path))
thumb_frame = instance.data["sourceIn"] + ((instance.data["sourceOut"] - instance.data["sourceIn"])/2)
thumb_frame = instance.data["sourceIn"] + (
(instance.data["sourceOut"] - instance.data["sourceIn"]) / 2)
self.log.debug("__ thumb_frame: {}".format(thumb_frame))
thumbnail = item.thumbnail(thumb_frame).save(
thumb_path,
format='png'
)
self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"]))
self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
self.log.debug(
"__ sourceIn: `{}`".format(instance.data["sourceIn"]))
self.log.debug(
"__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
self.log.debug("__ thumbnail: {}".format(thumbnail))
@ -159,7 +182,7 @@ class CollectReviews(api.InstancePlugin):
version_data.update({k: instance.data[k] for k in transfer_data})
if 'version' in instance.data:
version_data["version"] = instance.data[version]
version_data["version"] = instance.data["version"]
# add to data of representation
version_data.update({

View file

@ -0,0 +1,245 @@
import os
from pyblish import api
import pype
class ExtractReviewCutUpVideo(pype.api.Extractor):
"""Cut up clips from long video file"""
order = api.ExtractorOrder
# order = api.CollectorOrder + 0.1023
label = "Extract Review CutUp Video"
hosts = ["nukestudio"]
families = ["review"]
# presets
tags_addition = []
def process(self, instance):
inst_data = instance.data
asset = inst_data['asset']
# get representation and loop them
representations = inst_data["representations"]
# get resolution default
resolution_width = inst_data["resolutionWidth"]
resolution_height = inst_data["resolutionHeight"]
# frame range data
media_duration = inst_data["mediaDuration"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
input_args = list()
output_args = list()
tags = repre.get("tags", [])
# check if supported tags are in representation for activation
filter_tag = False
for tag in ["_cut-bigger", "_cut-smaller"]:
if tag in tags:
filter_tag = True
break
if not filter_tag:
continue
self.log.debug("__ repre: {}".format(repre))
file = repre.get("files")
staging_dir = repre.get("stagingDir")
frame_start = repre.get("frameStart")
frame_end = repre.get("frameEnd")
fps = repre.get("fps")
ext = repre.get("ext")
new_file_name = "{}_{}".format(asset, file)
full_input_path = os.path.join(
staging_dir, file)
full_output_dir = os.path.join(
staging_dir, "cuts")
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
full_output_path = os.path.join(
full_output_dir, new_file_name)
self.log.debug("__ full_input_path: {}".format(full_input_path))
self.log.debug("__ full_output_path: {}".format(full_output_path))
# check if audio stream is in input video file
ffprob_cmd = (
"{ffprobe_path} -i {full_input_path} -show_streams "
"-select_streams a -loglevel error"
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
audio_check_output = pype.api.subprocess(ffprob_cmd)
self.log.debug("audio_check_output: {}".format(audio_check_output))
# translate frame to sec
start_sec = float(frame_start) / fps
duration_sec = float(frame_end - frame_start + 1) / fps
empty_add = None
# check if not missing frames at start
if (start_sec < 0) or (media_duration < frame_end):
# for later swithing off `-c:v copy` output arg
empty_add = True
# init empty variables
video_empty_start = video_layer_start = ""
audio_empty_start = audio_layer_start = ""
video_empty_end = video_layer_end = ""
audio_empty_end = audio_layer_end = ""
audio_input = audio_output = ""
v_inp_idx = 0
concat_n = 1
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"{ffprobe_path} -i {full_input_path} -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
x, y = resolution_output.split("x")
resolution_width = int(x)
resolution_height = int(y)
except Exception as E:
self.log.warning(
"Video native resolution is untracable: {}".format(E))
if audio_check_output:
# adding input for empty audio
input_args.append("-f lavfi -i anullsrc")
# define audio empty concat variables
audio_input = "[1:a]"
audio_output = ":a=1"
v_inp_idx = 1
# adding input for video black frame
input_args.append((
"-f lavfi -i \"color=c=black:"
"s={resolution_width}x{resolution_height}:r={fps}\""
).format(**locals()))
if (start_sec < 0):
# recalculate input video timing
empty_start_dur = abs(start_sec)
start_sec = 0
duration_sec = float(frame_end - (
frame_start + (empty_start_dur * fps)) + 1) / fps
# define starting empty video concat variables
video_empty_start = (
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];"
).format(**locals())
video_layer_start = "[gv0]"
if audio_check_output:
# define starting empty audio concat variables
audio_empty_start = (
"[0]atrim=duration={empty_start_dur}[ga0];"
).format(**locals())
audio_layer_start = "[ga0]"
# alter concat number of clips
concat_n += 1
# check if not missing frames at the end
if (media_duration < frame_end):
# recalculate timing
empty_end_dur = float(frame_end - media_duration + 1) / fps
duration_sec = float(media_duration - frame_start) / fps
# define ending empty video concat variables
video_empty_end = (
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
).format(**locals())
video_layer_end = "[gv1]"
if audio_check_output:
# define ending empty audio concat variables
audio_empty_end = (
"[0]atrim=duration={empty_end_dur}[ga1];"
).format(**locals())
audio_layer_end = "[ga0]"
# alter concat number of clips
concat_n += 1
# concatting black frame togather
output_args.append((
"-filter_complex \""
"{audio_empty_start}"
"{video_empty_start}"
"{audio_empty_end}"
"{video_empty_end}"
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}"
"{video_layer_end}{audio_layer_end}"
"concat=n={concat_n}:v=1{audio_output}\""
).format(**locals()))
# append ffmpeg input video clip
input_args.append("-ss {:0.2f}".format(start_sec))
input_args.append("-t {:0.2f}".format(duration_sec))
input_args.append("-i {}".format(full_input_path))
# add copy audio video codec if only shortening clip
if ("_cut-bigger" in tags) and (not empty_add):
output_args.append("-c:v copy")
# make sure it is having no frame to frame comprassion
output_args.append("-intra")
# output filename
output_args.append("-y")
output_args.append(full_output_path)
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
repre_new = {
"files": new_file_name,
"stagingDir": full_output_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": fps,
"name": "cut_up_preview",
"tags": ["review", "delete"] + self.tags_addition,
"ext": ext,
"anatomy_template": "publish"
}
representations_new.append(repre_new)
for repre in representations_new:
if ("delete" in repre.get("tags", [])) and (
"cut_up_preview" not in repre["name"]):
representations_new.remove(repre)
self.log.debug(
"Representations: {}".format(representations_new))
instance.data["representations"] = representations_new

View file

@ -46,7 +46,6 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
in_data = json.load(f)
asset_name = in_data["asset"]
family_preset_key = in_data.get("family_preset_key", "")
family = in_data["family"]
subset = in_data["subset"]
@ -57,15 +56,6 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
presets = config.get_presets()
# Get from presets anatomy key that will be used for getting template
# - default integrate new is used if not set
anatomy_key = (
presets.get("standalone_publish", {})
.get("families", {})
.get(family_preset_key, {})
.get("anatomy_template")
)
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset", "name": asset_name})
context.data["project"] = project
@ -98,12 +88,9 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
instance.data["source"] = "standalone publisher"
for component in in_data["representations"]:
component["destination"] = component["files"]
component["stagingDir"] = component["stagingDir"]
# Do not set anatomy_template if not specified
if anatomy_key:
component["anatomy_template"] = anatomy_key
if isinstance(component["files"], list):
collections, remainder = clique.assemble(component["files"])
self.log.debug("collecting sequence: {}".format(collections))

View file

@ -20,7 +20,7 @@ FFMPEG = (
).format(ffmpeg_path)
FFPROBE = (
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
'{} -v quiet -print_format json -show_format -show_streams "%(source)s"'
).format(ffprobe_path)
DRAWTEXT = (
@ -55,7 +55,7 @@ def _streams(source):
def get_fps(str_value):
if str_value == "0/0":
print("Source has \"r_frame_rate\" value set to \"0/0\".")
log.warning("Source has \"r_frame_rate\" value set to \"0/0\".")
return "Unknown"
items = str_value.split("/")
@ -266,7 +266,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
:returns: completed command
:rtype: str
"""
output = output or ''
output = '"{}"'.format(output or '')
if overwrite:
output = '-y {}'.format(output)
@ -300,10 +300,10 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
args=args,
overwrite=overwrite
)
# print(command)
log.info("Launching command: {}".format(command))
proc = subprocess.Popen(command, shell=True)
proc.communicate()
log.info(proc.communicate()[0])
if proc.returncode != 0:
raise RuntimeError("Failed to render '%s': %s'"
% (output, command))
@ -335,22 +335,23 @@ def example(input_path, output_path):
def burnins_from_data(
input_path, output_path, data, codec_data=None, overwrite=True
input_path, output_path, data,
codec_data=None, options=None, burnin_values=None, overwrite=True
):
'''
This method adds burnins to video/image file based on presets setting.
"""This method adds burnins to video/image file based on presets setting.
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
:param input_path: full path to input file where burnins should be add
:type input_path: str
:param codec_data: all codec related arguments in list
:param codec_data: list
:param output_path: full path to output file where output will be rendered
:type output_path: str
:param data: data required for burnin settings (more info below)
:type data: dict
:param overwrite: output will be overriden if already exists, defaults to True
:type overwrite: bool
Args:
input_path (str): Full path to input file where burnins should be add.
output_path (str): Full path to output file where output will be
rendered.
data (dict): Data required for burnin settings (more info below).
codec_data (list): All codec related arguments in list.
options (dict): Options for burnins.
burnin_values (dict): Contain positioned values.
overwrite (bool): Output will be overriden if already exists,
True by default.
Presets must be set separately. Should be dict with 2 keys:
- "options" - sets look of burnins - colors, opacity,...(more info: ModifiedBurnins doc)
@ -391,11 +392,15 @@ def burnins_from_data(
"frame_start_tc": 1,
"shot": "sh0010"
}
'''
presets = config.get_presets().get('tools', {}).get('burnins', {})
options_init = presets.get('options')
"""
burnin = ModifiedBurnins(input_path, options_init=options_init)
# Use legacy processing when options are not set
if options is None or burnin_values is None:
presets = config.get_presets().get("tools", {}).get("burnins", {})
options = presets.get("options")
burnin_values = presets.get("burnins") or {}
burnin = ModifiedBurnins(input_path, options_init=options)
frame_start = data.get("frame_start")
frame_end = data.get("frame_end")
@ -425,7 +430,7 @@ def burnins_from_data(
if source_timecode is not None:
data[SOURCE_TIMECODE_KEY[1:-1]] = SOURCE_TIMECODE_KEY
for align_text, value in presets.get('burnins', {}).items():
for align_text, value in burnin_values.items():
if not value:
continue
@ -504,18 +509,39 @@ def burnins_from_data(
text = value.format(**data)
burnin.add_text(text, align, frame_start, frame_end)
codec_args = ""
ffmpeg_args = []
if codec_data:
codec_args = " ".join(codec_data)
# Use codec definition from method arguments
ffmpeg_args = codec_data
burnin.render(output_path, args=codec_args, overwrite=overwrite, **data)
else:
codec_name = burnin._streams[0].get("codec_name")
if codec_name:
ffmpeg_args.append("-codec:v {}".format(codec_name))
profile_name = burnin._streams[0].get("profile")
if profile_name:
# lower profile name and repalce spaces with underscore
profile_name = profile_name.replace(" ", "_").lower()
ffmpeg_args.append("-profile:v {}".format(profile_name))
pix_fmt = burnin._streams[0].get("pix_fmt")
if pix_fmt:
ffmpeg_args.append("-pix_fmt {}".format(pix_fmt))
ffmpeg_args_str = " ".join(ffmpeg_args)
burnin.render(
output_path, args=ffmpeg_args_str, overwrite=overwrite, **data
)
if __name__ == '__main__':
if __name__ == "__main__":
in_data = json.loads(sys.argv[-1])
burnins_from_data(
in_data['input'],
in_data['output'],
in_data['burnin_data'],
in_data['codec']
in_data["input"],
in_data["output"],
in_data["burnin_data"],
codec_data=in_data.get("codec"),
options=in_data.get("options"),
burnin_values=in_data.get("values")
)

View file

@ -14,9 +14,9 @@ def __main__():
"configuration.")
kwargs, args = parser.parse_known_args()
pype_root = os.environ.get("PYPE_ROOT")
pype_root = os.environ.get("PYPE_SETUP_PATH")
if not pype_root:
raise Exception("PYPE_ROOT is not set")
raise Exception("PYPE_SETUP_PATH is not set")
# TODO: set correct path
pype_command = "pype.ps1"

View file

@ -47,10 +47,10 @@ def __main__():
auto_pype_root = os.path.dirname(os.path.abspath(__file__))
auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..")
auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root
if os.environ.get('PYPE_ROOT'):
auto_pype_root = os.environ.get('PYPE_SETUP_PATH') or auto_pype_root
if os.environ.get('PYPE_SETUP_PATH'):
print("Got Pype location from environment: {}".format(
os.environ.get('PYPE_ROOT')))
os.environ.get('PYPE_SETUP_PATH')))
pype_command = "pype.ps1"
if platform.system().lower() == "linux":
@ -77,11 +77,10 @@ def __main__():
print("Paths: {}".format(kwargs.paths or [os.getcwd()]))
paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa
args = [
os.path.join(pype_root, pype_command),
"publish",
" ".join(paths)
" ".join(['"{}"'.format(p) for p in paths])
]
print("Pype command: {}".format(" ".join(args)))

View file

@ -18,7 +18,7 @@ def main(env):
# Register Host (and it's pyblish plugins)
host_name = env["AVALON_APP"]
# TODO not sure if use "pype." or "avalon." for host import
host_import_str = f"avalon.{host_name}"
host_import_str = f"pype.{host_name}"
try:
host_module = importlib.import_module(host_import_str)

View file

@ -255,10 +255,9 @@ class Handler(http.server.SimpleHTTPRequestHandler):
try:
in_data = json.loads(in_data_str)
except Exception as e:
log.error("Invalid JSON recieved:")
log.error("-" * 80)
log.error(in_data_str)
log.error("-" * 80)
log.error("Invalid JSON recieved: \"{}\"".format(
str(in_data_str)
))
raise Exception("Invalid JSON recieved") from e
request_info = RequestInfo(

View file

@ -46,25 +46,26 @@ class TextureCopy:
return asset
def _get_destination_path(self, asset, project):
root = api.registered_root()
PROJECT = api.Session["AVALON_PROJECT"]
project_name = project["name"]
hierarchy = ""
parents = asset['data']['parents']
if parents and len(parents) > 0:
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"asset": asset['name'],
"family": 'texture',
"subset": 'Main',
"hierarchy": hierarchy}
anatomy = Anatomy()
anatomy_filled = os.path.normpath(
anatomy.format(template_data)['texture']['path'])
return anatomy_filled
template_data = {
"project": {
"name": project_name,
"code": project['data']['code']
},
"silo": asset.get('silo'),
"asset": asset['name'],
"family": 'texture',
"subset": 'Main',
"hierarchy": hierarchy
}
anatomy = Anatomy(project_name)
anatomy_filled = anatomy.format(template_data)
return anatomy_filled['texture']['path']
def _get_version(self, path):
versions = [0]

View file

@ -1 +1 @@
__version__ = "2.8.0"
__version__ = "2.9.0"

BIN
res/app_icons/harmony.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

View file

@ -8,7 +8,6 @@
"additionalProperties": false,
"required": [
"template",
"tasks",
"apps"
],

View file

@ -9,7 +9,6 @@
"additionalProperties": true,
"required": [
"AVALON_PROJECTS",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_CONFIG"