Merge branch 'develop' into feature/rv_improvements

This commit is contained in:
Toke Jepsen 2019-07-24 23:04:45 +01:00
commit 0f058cbcf3
84 changed files with 2325 additions and 1740 deletions

View file

@ -7,11 +7,6 @@ from .lib import filter_pyblish_plugins
import logging
log = logging.getLogger(__name__)
# # do not delete these are mandatory
Anatomy = None
Dataflow = None
Colorspace = None
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
@ -26,6 +21,7 @@ def install():
pyblish.register_discovery_filter(filter_pyblish_plugins)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
def uninstall():
log.info("Deregistering global plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)

View file

@ -18,15 +18,8 @@ from .action import (
from pypeapp import Logger
from . import (
Anatomy,
Colorspace,
Dataflow
)
from .templates import (
load_data_from_templates,
reset_data_from_templates,
get_project_name,
get_project_code,
get_hierarchy,
@ -40,6 +33,7 @@ from .templates import (
)
from .lib import (
version_up,
get_handle_irregular,
get_project_data,
get_asset_data,
@ -65,11 +59,6 @@ __all__ = [
"ValidationException",
# contectual templates
# get data to preloaded templates
"load_data_from_templates",
"reset_data_from_templates",
# get contextual data
"get_handle_irregular",
"get_project_data",
@ -89,9 +78,4 @@ __all__ = [
"get_data_hierarchical_attr",
"get_avalon_project_template",
# preloaded templates
"Anatomy",
"Colorspace",
"Dataflow",
]

View file

@ -50,9 +50,6 @@ def install():
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
# load data from templates
api.load_data_from_templates()
# launch pico server
pico_server_launch()

View file

@ -1,10 +1,12 @@
from pype import api as pype
from pypeapp import Anatomy, config
log = pype.Logger().get_logger(__name__, "aport")
def get_anatomy(**kwarg):
return pype.Anatomy
return Anatomy()
def get_dataflow(**kwarg):
@ -15,7 +17,8 @@ def get_dataflow(**kwarg):
assert any([host, cls]), log.error("aport.templates.get_dataflow():"
"Missing mandatory kwargs `host`, `cls`")
aport_dataflow = getattr(pype.Dataflow, str(host), None)
presets = config.get_init_presets()
aport_dataflow = getattr(presets["dataflow"], str(host), None)
aport_dataflow_node = getattr(aport_dataflow.nodes, str(cls), None)
if preset:
aport_dataflow_node = getattr(aport_dataflow_node, str(preset), None)
@ -32,7 +35,8 @@ def get_colorspace(**kwarg):
assert any([host, cls]), log.error("aport.templates.get_colorspace():"
"Missing mandatory kwargs `host`, `cls`")
aport_colorspace = getattr(pype.Colorspace, str(host), None)
presets = config.get_init_presets()
aport_colorspace = getattr(presets["colorspace"], str(host), None)
aport_colorspace_node = getattr(aport_colorspace, str(cls), None)
if preset:
aport_colorspace_node = getattr(aport_colorspace_node, str(preset), None)

View file

@ -258,8 +258,8 @@ class CustomAttributes(BaseAction):
):
continue
if 'is_hierarchical' in data:
if data['is_hierarchical'] == attr['is_hierarchical']:
if data.get('is_hierarchical', False) is True:
if attr['is_hierarchical'] is True:
matching.append(attr)
elif 'object_type_id' in data:
if (
@ -453,6 +453,8 @@ class CustomAttributes(BaseAction):
def get_default(self, attr):
type = attr['type']
default = attr['default']
if default is None:
return default
err_msg = 'Default value is not'
if type == 'number':
if not isinstance(default, (float, int)):

View file

@ -0,0 +1,334 @@
import os
import sys
import json
import argparse
import logging
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
class CustomAttributeDoctor(BaseAction):
#: Action identifier.
identifier = 'custom.attributes.doctor'
#: Action label.
label = 'Custom Attributes Doctor'
#: Action description.
description = (
'Fix hierarchical custom attributes mainly handles, fstart'
' and fend'
)
icon = '{}/ftrack/action_icons/TestAction.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
hierarchical_ca = ['handle_start', 'handle_end', 'fstart', 'fend']
hierarchical_alternatives = {
'handle_start': 'handles',
'handle_end': 'handles'
}
# Roles for new custom attributes
read_roles = ['ALL',]
write_roles = ['ALL',]
data_ca = {
'handle_start': {
'label': 'Frame handles start',
'type': 'number',
'config': json.dumps({'isdecimal': False})
},
'handle_end': {
'label': 'Frame handles end',
'type': 'number',
'config': json.dumps({'isdecimal': False})
},
'fstart': {
'label': 'Frame start',
'type': 'number',
'config': json.dumps({'isdecimal': False})
},
'fend': {
'label': 'Frame end',
'type': 'number',
'config': json.dumps({'isdecimal': False})
}
}
def discover(self, session, entities, event):
''' Validation '''
return True
def interface(self, session, entities, event):
if event['data'].get('values', {}):
return
title = 'Select Project to fix Custom attributes'
items = []
item_splitter = {'type': 'label', 'value': '---'}
all_projects = session.query('Project').all()
for project in all_projects:
item_label = {
'type': 'label',
'value': '{} (<i>{}</i>)'.format(
project['full_name'], project['name']
)
}
item = {
'name': project['id'],
'type': 'boolean',
'value': False
}
if len(items) > 0:
items.append(item_splitter)
items.append(item_label)
items.append(item)
if len(items) == 0:
return {
'success': False,
'message': 'Didn\'t found any projects'
}
else:
return {
'items': items,
'title': title
}
def launch(self, session, entities, event):
if 'values' not in event['data']:
return
values = event['data']['values']
projects_to_update = []
for project_id, update_bool in values.items():
if not update_bool:
continue
project = session.query(
'Project where id is "{}"'.format(project_id)
).one()
projects_to_update.append(project)
if not projects_to_update:
self.log.debug('Nothing to update')
return {
'success': True,
'message': 'Nothing to update'
}
self.security_roles = {}
self.to_process = {}
# self.curent_default_values = {}
existing_attrs = session.query('CustomAttributeConfiguration').all()
self.prepare_custom_attributes(existing_attrs)
self.projects_data = {}
for project in projects_to_update:
self.process_data(project)
return True
def process_data(self, entity):
cust_attrs = entity.get('custom_attributes')
if not cust_attrs:
return
for dst_key, src_key in self.to_process.items():
if src_key in cust_attrs:
value = cust_attrs[src_key]
entity['custom_attributes'][dst_key] = value
self.session.commit()
for child in entity.get('children', []):
self.process_data(child)
def prepare_custom_attributes(self, existing_attrs):
to_process = {}
to_create = []
all_keys = {attr['key']: attr for attr in existing_attrs}
for key in self.hierarchical_ca:
if key not in all_keys:
self.log.debug(
'Custom attribute "{}" does not exist at all'.format(key)
)
to_create.append(key)
if key in self.hierarchical_alternatives:
alt_key = self.hierarchical_alternatives[key]
if alt_key in all_keys:
self.log.debug((
'Custom attribute "{}" will use values from "{}"'
).format(key, alt_key))
to_process[key] = alt_key
obj = all_keys[alt_key]
# if alt_key not in self.curent_default_values:
# self.curent_default_values[alt_key] = obj['default']
obj['default'] = None
self.session.commit()
else:
obj = all_keys[key]
new_key = key + '_old'
if obj['is_hierarchical']:
if new_key not in all_keys:
self.log.info((
'Custom attribute "{}" is already hierarchical'
' and can\'t find old one'
).format(key)
)
continue
to_process[key] = new_key
continue
# default_value = obj['default']
# if new_key not in self.curent_default_values:
# self.curent_default_values[new_key] = default_value
obj['key'] = new_key
obj['label'] = obj['label'] + '(old)'
obj['default'] = None
self.session.commit()
to_create.append(key)
to_process[key] = new_key
self.to_process = to_process
for key in to_create:
data = {
'key': key,
'entity_type': 'show',
'is_hierarchical': True,
'default': None
}
for _key, _value in self.data_ca.get(key, {}).items():
if _key == 'type':
_value = self.session.query((
'CustomAttributeType where name is "{}"'
).format(_value)).first()
data[_key] = _value
avalon_group = self.session.query(
'CustomAttributeGroup where name is "avalon"'
).first()
if avalon_group:
data['group'] = avalon_group
read_roles = self.get_security_role(self.read_roles)
write_roles = self.get_security_role(self.write_roles)
data['read_security_roles'] = read_roles
data['write_security_roles'] = write_roles
self.session.create('CustomAttributeConfiguration', data)
self.session.commit()
# def return_back_defaults(self):
# existing_attrs = self.session.query(
# 'CustomAttributeConfiguration'
# ).all()
#
# for attr_key, default in self.curent_default_values.items():
# for attr in existing_attrs:
# if attr['key'] != attr_key:
# continue
# attr['default'] = default
# self.session.commit()
# break
def get_security_role(self, security_roles):
roles = []
if len(security_roles) == 0 or security_roles[0] == 'ALL':
roles = self.get_role_ALL()
elif security_roles[0] == 'except':
excepts = security_roles[1:]
all = self.get_role_ALL()
for role in all:
if role['name'] not in excepts:
roles.append(role)
if role['name'] not in self.security_roles:
self.security_roles[role['name']] = role
else:
for role_name in security_roles:
if role_name in self.security_roles:
roles.append(self.security_roles[role_name])
continue
try:
query = 'SecurityRole where name is "{}"'.format(role_name)
role = self.session.query(query).one()
self.security_roles[role_name] = role
roles.append(role)
except Exception:
self.log.warning(
'Securit role "{}" does not exist'.format(role_name)
)
continue
return roles
def get_role_ALL(self):
role_name = 'ALL'
if role_name in self.security_roles:
all_roles = self.security_roles[role_name]
else:
all_roles = self.session.query('SecurityRole').all()
self.security_roles[role_name] = all_roles
for role in all_roles:
if role['name'] not in self.security_roles:
self.security_roles[role['name']] = role
return all_roles
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
CustomAttributeDoctor(session).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -1,705 +0,0 @@
import os
import sys
import argparse
import json
import logging
import collections
import tempfile
import requests
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
from pypeapp import config
class SyncAssetVersions(BaseAction):
#: Action identifier.
identifier = 'sync.asset.versions'
#: Action label.
label = 'Sync Asset Versions'
#: Action description.
description = 'Synchronize Asset versions to another Ftrack'
#: roles that are allowed to register this action
role_list = ['Administrator', 'Project Manager', 'Pypeclub']
# ENTER VALUES HERE (change values based on keys)
# Custom attribute storing ftrack id of destination server
id_key_src = 'fridge_ftrackID'
# Custom attribute storing ftrack id of source server
id_key_dst = 'kredenc_ftrackID'
components_name = (
'ftrackreview-mp4_src',
'ftrackreview-image_src',
'thumbnail_src'
)
# comp name mapping
comp_name_mapping = {
'ftrackreview-mp4_src': 'ftrackreview-mp4',
'ftrackreview-image_src': 'ftrackreview-image',
'thumbnail_src': 'thumbnail'
}
comp_location_mapping = {
'ftrack.server': [
'ftrackreview-mp4',
'ftrackreview-mp4_src',
'ftrackreview-image',
'ftrackreview-image_src',
'thumbnail',
'thumbnail_src'
],
'ftrack.unmanaged': []
}
def discover(self, session, entities, event):
''' Validation '''
for entity in entities:
if entity.entity_type.lower() != 'assetversion':
return False
return True
def launch(self, session, entities, event):
self.dst_ftrack_locations = {}
self.interface_messages = {}
# stop if custom attribute for storing second ftrack id is missing
if self.id_key_src not in entities[0]['custom_attributes']:
msg = (
'Custom attribute "{}" does not exist on AssetVersion'
).format(self.id_key_src)
self.log.error(msg)
return {
'success': False,
'message': msg
}
source_credentials = config.get_presets()['ftrack'].get(
'partnership_ftrack_cred', {}
)
self.dst_session = ftrack_api.Session(
server_url=source_credentials.get('server_url'),
api_key=source_credentials.get('api_key'),
api_user=source_credentials.get('api_user'),
auto_connect_event_hub=True
)
# NOTE Shared session has issues with location definition
self.session_for_components = ftrack_api.Session(
server_url=session.server_url,
api_key=session.api_key,
api_user=session.api_user,
auto_connect_event_hub=True
)
for entity in entities:
asset = entity['asset']
parent = asset['parent']
# Check if asset version already has entity on destinaition Ftrack
# TODO ? skip if yes
# ? show to user - with interface/message/note
# + or ask if user want to override found version ????
dst_ftrack_id = entity['custom_attributes'].get(self.id_key_src)
if dst_ftrack_id:
dst_ftrack_ent = self.dst_session.query(
'AssetVersion where id = "{}"'.format(dst_ftrack_id)
).first()
if dst_ftrack_ent:
self.log.warning(
'"{}" - Already exists. Skipping'.format(asset['name'])
)
continue
# Find parent where Version will be uploaded
dst_parent_id = parent['custom_attributes'].get(self.id_key_src)
if not dst_parent_id:
self.log.warning((
'Entity: "{}" don\'t have stored Custom attribute "{}"'
).format(parent['name'], self.id_key_src))
continue
dst_parent_entity = self.dst_session.query(
'TypedContext where id = "{}"'.format(dst_parent_id)
).first()
if not dst_parent_entity:
msg = (
'Didn\'t found mirrored entity in destination Ftrack'
' for "{}"'
).format(parent['name'])
self.log.warning(msg)
continue
component_list = self.prepare_data(entity['id'])
id_stored = False
for comp_data in component_list:
dst_asset_ver_id = self.asset_version_creation(
dst_parent_entity, comp_data, entity
)
if id_stored:
continue
entity['custom_attributes'][self.id_key_src] = dst_asset_ver_id
session.commit()
id_stored = True
self.dst_session.close()
self.session_for_components.close()
self.dst_session = None
self.session_for_components = None
return True
def prepare_data(self, asset_version_id):
components_list = []
asset_version = self.session_for_components.query(
'AssetVersion where id is "{}"'.format(asset_version_id)
).one()
# Asset data
asset_type = asset_version['asset']['type'].get('short', 'upload')
assettype_data = {'short': asset_type}
asset_data = {'name': asset_version['asset']['name']}
# Asset version data
assetversion_data = {'version': asset_version['version']}
# Component data
components_of_interest = {}
for name in self.components_name:
components_of_interest[name] = False
for key in components_of_interest:
# Find component by name
for comp in asset_version['components']:
if comp['name'] == key:
components_of_interest[key] = True
break
# NOTE if component was found then continue
if components_of_interest[key]:
continue
# Look for alternative component name set in mapping
new_key = None
if key in self.comp_name_mapping:
new_key = self.comp_name_mapping[key]
if not new_key:
self.log.warning(
'Asset version do not have components "{}" or "{}"'.format(
key, new_key
)
)
continue
components_of_interest[new_key] = components_of_interest.pop(key)
# Try to look for alternative name
for comp in asset_version['components']:
if comp['name'] == new_key:
components_of_interest[new_key] = True
break
# Check if at least one component is transferable
have_comp_to_transfer = False
for value in components_of_interest.values():
if value:
have_comp_to_transfer = True
break
if not have_comp_to_transfer:
return components_list
thumbnail_id = asset_version.get('thumbnail_id')
temp_folder = tempfile.mkdtemp('components')
# Data for transfer components
for comp in asset_version['components']:
comp_name = comp['name']
if comp_name not in components_of_interest:
continue
if not components_of_interest[comp_name]:
continue
if comp_name in self.comp_name_mapping:
comp_name = self.comp_name_mapping[comp_name]
is_thumbnail = False
for _comp in asset_version['components']:
if _comp['name'] == comp_name:
if _comp['id'] == thumbnail_id:
is_thumbnail = True
break
locatiom_name = comp['component_locations'][0]['location']['name']
location = self.session_for_components.query(
'Location where name is "{}"'.format(locatiom_name)
).one()
file_path = None
if locatiom_name == 'ftrack.unmanaged':
file_path = ''
try:
file_path = location.get_filesystem_path(comp)
except Exception:
pass
file_path = os.path.normpath(file_path)
if not os.path.exists(file_path):
file_path = comp['component_locations'][0][
'resource_identifier'
]
file_path = os.path.normpath(file_path)
if not os.path.exists(file_path):
self.log.warning(
'In component: "{}" can\'t access filepath: "{}"'.format(
comp['name'], file_path
)
)
continue
elif locatiom_name == 'ftrack.server':
download_url = location.get_url(comp)
file_name = '{}{}{}'.format(
asset_version['asset']['name'],
comp_name,
comp['file_type']
)
file_path = os.path.sep.join([temp_folder, file_name])
self.download_file(download_url, file_path)
if not file_path:
self.log.warning(
'In component: "{}" is invalid file path'.format(
comp['name']
)
)
continue
# Default location name value is ftrack.unmanaged
location_name = 'ftrack.unmanaged'
# Try to find location where component will be created
for name, keys in self.comp_location_mapping.items():
if comp_name in keys:
location_name = name
break
dst_location = self.get_dst_location(location_name)
# Metadata
metadata = {}
metadata.update(comp.get('metadata', {}))
component_data = {
"name": comp_name,
"metadata": metadata
}
data = {
'assettype_data': assettype_data,
'asset_data': asset_data,
'assetversion_data': assetversion_data,
'component_data': component_data,
'component_overwrite': False,
'thumbnail': is_thumbnail,
'component_location': dst_location,
'component_path': file_path
}
components_list.append(data)
return components_list
def asset_version_creation(self, dst_parent_entity, data, src_entity):
assettype_data = data['assettype_data']
self.log.debug("data: {}".format(data))
assettype_entity = self.dst_session.query(
self.query("AssetType", assettype_data)
).first()
# Create a new entity if none exits.
if not assettype_entity:
assettype_entity = self.dst_session.create(
"AssetType", assettype_data
)
self.dst_session.commit()
self.log.debug(
"Created new AssetType with data: ".format(assettype_data)
)
# Asset
# Get existing entity.
asset_data = {
"name": src_entity['asset']['name'],
"type": assettype_entity,
"parent": dst_parent_entity
}
asset_data.update(data.get("asset_data", {}))
asset_entity = self.dst_session.query(
self.query("Asset", asset_data)
).first()
self.log.info("asset entity: {}".format(asset_entity))
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
asset_metadata = asset_data.pop("metadata", {})
# Create a new entity if none exits.
info_msg = (
'Created new {entity_type} with data: {data}'
", metadata: {metadata}."
)
if not asset_entity:
asset_entity = self.dst_session.create("Asset", asset_data)
self.dst_session.commit()
self.log.debug(
info_msg.format(
entity_type="Asset",
data=asset_data,
metadata=asset_metadata
)
)
# Adding metadata
existing_asset_metadata = asset_entity["metadata"]
existing_asset_metadata.update(asset_metadata)
asset_entity["metadata"] = existing_asset_metadata
# AssetVersion
assetversion_data = {
'version': 0,
'asset': asset_entity
}
# NOTE task is skipped (can't be identified in other ftrack)
# if task:
# assetversion_data['task'] = task
# NOTE assetversion_data contains version number which is not correct
assetversion_data.update(data.get("assetversion_data", {}))
assetversion_entity = self.dst_session.query(
self.query("AssetVersion", assetversion_data)
).first()
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
assetversion_metadata = assetversion_data.pop("metadata", {})
# Create a new entity if none exits.
if not assetversion_entity:
assetversion_entity = self.dst_session.create(
"AssetVersion", assetversion_data
)
self.dst_session.commit()
self.log.debug(
info_msg.format(
entity_type="AssetVersion",
data=assetversion_data,
metadata=assetversion_metadata
)
)
# Check if custom attribute can of main Ftrack can be set
if self.id_key_dst not in assetversion_entity['custom_attributes']:
self.log.warning((
'Destination Asset Version do not have key "{}" in'
' Custom attributes'
).format(self.id_key_dst))
return
assetversion_entity['custom_attributes'][self.id_key_dst] = src_entity['id']
# Adding metadata
existing_assetversion_metadata = assetversion_entity["metadata"]
existing_assetversion_metadata.update(assetversion_metadata)
assetversion_entity["metadata"] = existing_assetversion_metadata
# Have to commit the version and asset, because location can't
# determine the final location without.
self.dst_session.commit()
# Component
# Get existing entity.
component_data = {
"name": "main",
"version": assetversion_entity
}
component_data.update(data.get("component_data", {}))
component_entity = self.dst_session.query(
self.query("Component", component_data)
).first()
component_overwrite = data.get("component_overwrite", False)
location = None
location_name = data.get("component_location", {}).get('name')
if location_name:
location = self.dst_session.query(
'Location where name is "{}"'.format(location_name)
).first()
if not location:
location = self.dst_session.pick_location()
# Overwrite existing component data if requested.
if component_entity and component_overwrite:
origin_location = self.dst_session.query(
'Location where name is "ftrack.origin"'
).one()
# Removing existing members from location
components = list(component_entity.get("members", []))
components += [component_entity,]
for component in components:
for loc in component["component_locations"]:
if location["id"] == loc["location_id"]:
location.remove_component(
component, recursive=False
)
# Deleting existing members on component entity
for member in component_entity.get("members", []):
self.dst_session.delete(member)
del(member)
self.dst_session.commit()
# Reset members in memory
if "members" in component_entity.keys():
component_entity["members"] = []
# Add components to origin location
try:
collection = clique.parse(data["component_path"])
except ValueError:
# Assume its a single file
# Changing file type
name, ext = os.path.splitext(data["component_path"])
component_entity["file_type"] = ext
origin_location.add_component(
component_entity, data["component_path"]
)
else:
# Changing file type
component_entity["file_type"] = collection.format("{tail}")
# Create member components for sequence.
for member_path in collection:
size = 0
try:
size = os.path.getsize(member_path)
except OSError:
pass
name = collection.match(member_path).group("index")
member_data = {
"name": name,
"container": component_entity,
"size": size,
"file_type": os.path.splitext(member_path)[-1]
}
component = self.dst_session.create(
"FileComponent", member_data
)
origin_location.add_component(
component, member_path, recursive=False
)
component_entity["members"].append(component)
# Add components to location.
location.add_component(
component_entity, origin_location, recursive=True
)
data["component"] = component_entity
msg = "Overwriting Component with path: {0}, data: {1}, "
msg += "location: {2}"
self.log.info(
msg.format(
data["component_path"],
component_data,
location
)
)
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
component_metadata = component_data.pop("metadata", {})
# Create new component if none exists.
new_component = False
if not component_entity:
component_entity = assetversion_entity.create_component(
data["component_path"],
data=component_data,
location=location
)
data["component"] = component_entity
msg = (
"Created new Component with path: {}, data: {}"
", metadata: {}, location: {}"
)
self.log.info(msg.format(
data["component_path"],
component_data,
component_metadata,
location['name']
))
new_component = True
# Adding metadata
existing_component_metadata = component_entity["metadata"]
existing_component_metadata.update(component_metadata)
component_entity["metadata"] = existing_component_metadata
# if component_data['name'] = 'ftrackreview-mp4-mp4':
# assetversion_entity["thumbnail_id"]
# Setting assetversion thumbnail
if data.get("thumbnail", False):
assetversion_entity["thumbnail_id"] = component_entity["id"]
# Inform user about no changes to the database.
if (
component_entity and
not component_overwrite and
not new_component
):
data["component"] = component_entity
self.log.info(
"Found existing component, and no request to overwrite. "
"Nothing has been changed."
)
return
# Commit changes.
self.dst_session.commit()
return assetversion_entity['id']
def query(self, entitytype, data):
""" Generate a query expression from data supplied.
If a value is not a string, we'll add the id of the entity to the
query.
Args:
entitytype (str): The type of entity to query.
data (dict): The data to identify the entity.
exclusions (list): All keys to exclude from the query.
Returns:
str: String query to use with "session.query"
"""
queries = []
if sys.version_info[0] < 3:
for key, value in data.iteritems():
if not isinstance(value, (basestring, int)):
self.log.info("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
else:
for key, value in data.items():
if not isinstance(value, (str, int)):
self.log.info("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
query = (
entitytype + " where " + " and ".join(queries)
)
return query
def download_file(self, url, path):
r = requests.get(url, stream=True).content
with open(path, 'wb') as f:
f.write(r)
def get_dst_location(self, name):
if name in self.dst_ftrack_locations:
return self.dst_ftrack_locations[name]
location = self.dst_session.query(
'Location where name is "{}"'.format(name)
).one()
self.dst_ftrack_locations[name] = location
return location
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
SyncAssetVersions(session).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -1,5 +1,6 @@
import os
import sys
import json
import argparse
import logging
import collections
@ -16,13 +17,13 @@ class SyncHierarchicalAttrs(BaseAction):
ca_mongoid = lib.get_ca_mongoid()
#: Action identifier.
identifier = 'sync.hierarchical.attrs'
identifier = 'sync.hierarchical.attrs.local'
#: Action label.
label = 'Sync hierarchical attributes'
label = 'Sync HierAttrs - Local'
#: Action description.
description = 'Synchronize hierarchical attributes'
#: Icon
icon = '{}/ftrack/action_icons/SyncHierarchicalAttrs.svg'.format(
icon = '{}/ftrack/action_icons/SyncHierarchicalAttrsLocal.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
@ -33,74 +34,99 @@ class SyncHierarchicalAttrs(BaseAction):
''' Validation '''
for entity in entities:
if (
entity['context_type'].lower() in ('show', 'task') and
entity.get('context_type', '').lower() in ('show', 'task') and
entity.entity_type.lower() != 'task'
):
return True
return False
def launch(self, session, entities, event):
# Collect hierarchical attrs
custom_attributes = {}
all_avalon_attr = session.query(
'CustomAttributeGroup where name is "avalon"'
user = session.query(
'User where id is "{}"'.format(event['source']['user']['id'])
).one()
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
if 'avalon_' in cust_attr['key']:
continue
if not cust_attr['is_hierarchical']:
continue
job = session.create('Job', {
'user': user,
'status': 'running',
'data': json.dumps({
'description': 'Sync Hierachical attributes'
})
})
session.commit()
if cust_attr['default']:
self.log.warning((
'Custom attribute "{}" has set default value.'
' This attribute can\'t be synchronized'
).format(cust_attr['label']))
continue
custom_attributes[cust_attr['key']] = cust_attr
if not custom_attributes:
msg = 'No hierarchical attributes to sync.'
self.log.debug(msg)
return {
'success': True,
'message': msg
}
entity = entities[0]
if entity.entity_type.lower() == 'project':
project_name = entity['full_name']
else:
project_name = entity['project']['full_name']
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = project_name
for entity in entities:
for key in custom_attributes:
# check if entity has that attribute
if key not in entity['custom_attributes']:
self.log.debug(
'Hierachical attribute "{}" not found on "{}"'.format(
key, entity.get('name', entity)
)
)
try:
# Collect hierarchical attrs
custom_attributes = {}
all_avalon_attr = session.query(
'CustomAttributeGroup where name is "avalon"'
).one()
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
if 'avalon_' in cust_attr['key']:
continue
value = self.get_hierarchical_value(key, entity)
if value is None:
self.log.warning(
'Hierarchical attribute "{}" not set on "{}"'.format(
key, entity.get('name', entity)
)
)
if not cust_attr['is_hierarchical']:
continue
self.update_hierarchical_attribute(entity, key, value)
if cust_attr['default']:
self.log.warning((
'Custom attribute "{}" has set default value.'
' This attribute can\'t be synchronized'
).format(cust_attr['label']))
continue
self.db_con.uninstall()
custom_attributes[cust_attr['key']] = cust_attr
if not custom_attributes:
msg = 'No hierarchical attributes to sync.'
self.log.debug(msg)
return {
'success': True,
'message': msg
}
entity = entities[0]
if entity.entity_type.lower() == 'project':
project_name = entity['full_name']
else:
project_name = entity['project']['full_name']
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = project_name
for entity in entities:
for key in custom_attributes:
# check if entity has that attribute
if key not in entity['custom_attributes']:
self.log.debug(
'Hierachical attribute "{}" not found on "{}"'.format(
key, entity.get('name', entity)
)
)
continue
value = self.get_hierarchical_value(key, entity)
if value is None:
self.log.warning(
'Hierarchical attribute "{}" not set on "{}"'.format(
key, entity.get('name', entity)
)
)
continue
self.update_hierarchical_attribute(entity, key, value)
except Exception:
self.log.error(
'Action "{}" failed'.format(self.label),
exc_info=True
)
finally:
self.db_con.uninstall()
if job['status'] in ('queued', 'running'):
job['status'] = 'failed'
session.commit()
return True

View file

@ -1,230 +0,0 @@
import os
import sys
import time
import datetime
import requests
import tempfile
from pypeapp import config
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
from pype.ftrack.lib.custom_db_connector import DbConnector, ClientSession
class SynchronizeNotes(BaseAction):
#: Action identifier.
identifier = 'sync.notes'
#: Action label.
label = 'Synchronize Notes'
#: Action description.
description = 'Synchronize notes from one Ftrack to another'
#: roles that are allowed to register this action
role_list = ['Administrator', 'Project Manager', 'Pypeclub']
db_con = DbConnector(
mongo_url=os.environ["AVALON_MONGO"],
database_name='notes_database',
table_name='notes_table'
)
id_key_src = 'fridge_ftrackID'
id_key_dst = 'kredenc_ftrackID'
def discover(self, session, entities, event):
''' Validation '''
if len(entities) == 0:
return False
for entity in entities:
if entity.entity_type.lower() != 'assetversion':
return False
return True
def launch(self, session, entities, event):
source_credentials = config.get_presets()['ftrack'].get(
'partnership_ftrack_cred', {}
)
self.session_source = ftrack_api.Session(
server_url=source_credentials.get('server_url'),
api_key=source_credentials.get('api_key'),
api_user=source_credentials.get('api_user'),
auto_connect_event_hub=True
)
self.session_for_components = ftrack_api.Session(
server_url=session.server_url,
api_key=session.api_key,
api_user=session.api_user,
auto_connect_event_hub=True
)
self.user = self.session_for_components.query(
'User where username is "{}"'.format(self.session.api_user)
).one()
self.db_con.install()
missing_id_entities = []
to_sync_data = []
for dst_entity in entities:
# Ignore entities withoud stored id from second ftrack
from_id = dst_entity['custom_attributes'].get(self.id_key_src)
if not from_id:
missing_id_entities.append(dst_entity.get('name', dst_entity))
continue
to_sync_data.append((dst_entity.entity_type, dst_entity['id']))
for dst_entity_data in to_sync_data:
av_query = 'AssetVersion where id is "{}"'.format(from_id)
src_entity = self.session_source.query(av_query).one()
src_notes = src_entity['notes']
self.sync_notes(src_notes, dst_entity_data)
self.db_con.uninstall()
if missing_id_entities:
self.log.info('Entities without Avalon ID:')
self.log.info(missing_id_entities)
return True
def sync_notes(self, src_notes, dst_entity_data):
# Sort notes by date time
src_notes = sorted(src_notes, key=lambda note: note['date'])
for src_note in src_notes:
# Find if exists in DB
db_note_entity = self.db_con.find_one({
self.id_key_src: src_note['id']
})
# WARNING: expr `if not db_note_entity:` does not work!
if db_note_entity is None:
# Create note if not found in DB
dst_note_id = self.create_note(
src_note, dst_entity_data
)
# Add references to DB for next sync
item = {
self.id_key_dst: dst_note_id,
self.id_key_src: src_note['id'],
'content': src_note['content'],
'entity_type': 'Note',
'sync_date': str(datetime.date.today())
}
self.db_con.insert_one(item)
else:
dst_note_id = db_note_entity[self.id_key_dst]
replies = src_note.get('replies')
if not replies:
continue
self.sync_notes(replies, ('Note', dst_note_id))
def create_note(self, src_note, dst_entity_data):
# dst_entity_data - tuple(entity type, entity id)
dst_entity = self.session.query(
'{} where id is "{}"'.format(*dst_entity_data)
).one()
is_reply = False
if dst_entity.entity_type.lower() != 'note':
# Category
category = None
cat = src_note['category']
if cat:
cat_name = cat['name']
category = self.session.query(
'NoteCategory where name is "{}"'.format(cat_name)
).first()
new_note = dst_entity.create_note(
src_note['content'], self.user, category=category
)
else:
new_note = dst_entity.create_reply(
src_note['content'], self.user
)
is_reply = True
# QUESTION Should we change date to match source Ftrack?
new_note['date'] = src_note['date']
self.session.commit()
new_note_id = new_note['id']
# Components
if src_note['note_components']:
self.reupload_components(src_note, new_note_id)
# Bug in ftrack_api, when reply is added session must be reset
if is_reply:
self.session.reset()
time.sleep(0.2)
return new_note_id
def reupload_components(self, src_note, dst_note_id):
# Download and collect source components
src_server_location = self.session_source.query(
'Location where name is "ftrack.server"'
).one()
temp_folder = tempfile.mkdtemp('note_components')
#download and store path to upload
paths_to_upload = []
count = 0
for note_component in src_note['note_components']:
count +=1
download_url = src_server_location.get_url(
note_component['component']
)
file_name = '{}{}{}'.format(
str(src_note['date'].format('YYYYMMDDHHmmss')),
"{:0>3}".format(count),
note_component['component']['file_type']
)
path = os.path.sep.join([temp_folder, file_name])
self.download_file(download_url, path)
paths_to_upload.append(path)
# Create downloaded components and add to note
dst_server_location = self.session_for_components.query(
'Location where name is "ftrack.server"'
).one()
for path in paths_to_upload:
component = self.session_for_components.create_component(
path,
data={'name': 'My file'},
location=dst_server_location
)
# Attach the component to the note.
self.session_for_components.create(
'NoteComponent',
{'component_id': component['id'], 'note_id': dst_note_id}
)
self.session_for_components.commit()
def download_file(self, url, path):
r = requests.get(url, stream=True).content
with open(path, 'wb') as f:
f.write(r)
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
SynchronizeNotes(session).register()

View file

@ -6,6 +6,7 @@ import json
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, lib as ftracklib
from pype.vendor.ftrack_api import session as fa_session
class SyncToAvalon(BaseAction):
@ -176,6 +177,18 @@ class SyncToAvalon(BaseAction):
job['status'] = 'failed'
session.commit()
event = fa_session.ftrack_api.event.base.Event(
topic='ftrack.action.launch',
data=dict(
actionIdentifier='sync.hierarchical.attrs.local',
selection=event['data']['selection']
),
source=dict(
user=event['source']['user']
)
)
session.event_hub.publish(event, on_error='ignore')
if len(message) > 0:
message = "Unable to sync: {}".format(message)
return {

View file

@ -0,0 +1,274 @@
import os
import sys
import json
import argparse
import logging
import collections
from pypeapp import config
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, lib
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
from bson.objectid import ObjectId
class SyncHierarchicalAttrs(BaseAction):
db_con = DbConnector()
ca_mongoid = lib.get_ca_mongoid()
#: Action identifier.
identifier = 'sync.hierarchical.attrs'
#: Action label.
label = 'Sync HierAttrs'
#: Action description.
description = 'Synchronize hierarchical attributes'
#: Icon
icon = '{}/ftrack/action_icons/SyncHierarchicalAttrs.svg'.format(
os.environ.get(
'PYPE_STATICS_SERVER',
'http://localhost:{}'.format(
config.get_presets().get('services', {}).get(
'statics_server', {}
).get('default_port', 8021)
)
)
)
def register(self):
self.session.event_hub.subscribe(
'topic=ftrack.action.discover',
self._discover
)
self.session.event_hub.subscribe(
'topic=ftrack.action.launch and data.actionIdentifier={}'.format(
self.identifier
),
self._launch
)
def discover(self, session, entities, event):
''' Validation '''
role_check = False
discover = False
role_list = ['Pypeclub', 'Administrator', 'Project Manager']
user = session.query(
'User where id is "{}"'.format(event['source']['user']['id'])
).one()
for role in user['user_security_roles']:
if role['security_role']['name'] in role_list:
role_check = True
break
print(self.icon)
if role_check is True:
for entity in entities:
context_type = entity.get('context_type', '').lower()
if (
context_type in ('show', 'task') and
entity.entity_type.lower() != 'task'
):
discover = True
break
return discover
def launch(self, session, entities, event):
user = session.query(
'User where id is "{}"'.format(event['source']['user']['id'])
).one()
job = session.create('Job', {
'user': user,
'status': 'running',
'data': json.dumps({
'description': 'Sync Hierachical attributes'
})
})
session.commit()
try:
# Collect hierarchical attrs
custom_attributes = {}
all_avalon_attr = session.query(
'CustomAttributeGroup where name is "avalon"'
).one()
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
if 'avalon_' in cust_attr['key']:
continue
if not cust_attr['is_hierarchical']:
continue
if cust_attr['default']:
self.log.warning((
'Custom attribute "{}" has set default value.'
' This attribute can\'t be synchronized'
).format(cust_attr['label']))
continue
custom_attributes[cust_attr['key']] = cust_attr
if not custom_attributes:
msg = 'No hierarchical attributes to sync.'
self.log.debug(msg)
return {
'success': True,
'message': msg
}
entity = entities[0]
if entity.entity_type.lower() == 'project':
project_name = entity['full_name']
else:
project_name = entity['project']['full_name']
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = project_name
for entity in entities:
for key in custom_attributes:
# check if entity has that attribute
if key not in entity['custom_attributes']:
self.log.debug(
'Hierachical attribute "{}" not found on "{}"'.format(
key, entity.get('name', entity)
)
)
continue
value = self.get_hierarchical_value(key, entity)
if value is None:
self.log.warning(
'Hierarchical attribute "{}" not set on "{}"'.format(
key, entity.get('name', entity)
)
)
continue
self.update_hierarchical_attribute(entity, key, value)
except Exception:
self.log.error(
'Action "{}" failed'.format(self.label),
exc_info=True
)
finally:
self.db_con.uninstall()
if job['status'] in ('queued', 'running'):
job['status'] = 'failed'
session.commit()
return True
def get_hierarchical_value(self, key, entity):
value = entity['custom_attributes'][key]
if (
value is not None or
entity.entity_type.lower() == 'project'
):
return value
return self.get_hierarchical_value(key, entity['parent'])
def update_hierarchical_attribute(self, entity, key, value):
if (
entity['context_type'].lower() not in ('show', 'task') or
entity.entity_type.lower() == 'task'
):
return
# collect entity's custom attributes
custom_attributes = entity.get('custom_attributes')
if not custom_attributes:
return
mongoid = custom_attributes.get(self.ca_mongoid)
if not mongoid:
self.log.debug('Entity "{}" is not synchronized to avalon.'.format(
entity.get('name', entity)
))
return
try:
mongoid = ObjectId(mongoid)
except Exception:
self.log.warning('Entity "{}" has stored invalid MongoID.'.format(
entity.get('name', entity)
))
return
# Find entity in Mongo DB
mongo_entity = self.db_con.find_one({'_id': mongoid})
if not mongo_entity:
self.log.warning(
'Entity "{}" is not synchronized to avalon.'.format(
entity.get('name', entity)
)
)
return
# Change value if entity has set it's own
entity_value = custom_attributes[key]
if entity_value is not None:
value = entity_value
data = mongo_entity.get('data') or {}
data[key] = value
self.db_con.update_many(
{'_id': mongoid},
{'$set': {'data': data}}
)
for child in entity.get('children', []):
self.update_hierarchical_attribute(child, key, value)
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
SyncHierarchicalAttrs(session).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -3,8 +3,11 @@ import sys
import argparse
import logging
import json
from pypeapp import config
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, lib
from pype.vendor.ftrack_api import session as fa_session
class Sync_To_Avalon(BaseAction):
@ -50,7 +53,14 @@ class Sync_To_Avalon(BaseAction):
description = 'Send data from Ftrack to Avalon'
#: Action icon.
icon = '{}/ftrack/action_icons/SyncToAvalon.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
os.environ.get(
'PYPE_STATICS_SERVER',
'http://localhost:{}'.format(
config.get_presets().get('services', {}).get(
'statics_server', {}
).get('default_port', 8021)
)
)
)
def register(self):
@ -70,7 +80,7 @@ class Sync_To_Avalon(BaseAction):
''' Validation '''
roleCheck = False
discover = False
roleList = ['Administrator', 'Project Manager']
roleList = ['Pypeclub', 'Administrator', 'Project Manager']
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
@ -191,6 +201,24 @@ class Sync_To_Avalon(BaseAction):
' - Please check Log for more information'
)
finally:
if job['status'] in ['queued', 'running']:
job['status'] = 'failed'
session.commit()
event = fa_session.ftrack_api.event.base.Event(
topic='ftrack.action.launch',
data=dict(
actionIdentifier='sync.hierarchical.attrs',
selection=event['data']['selection']
),
source=dict(
user=event['source']['user']
)
)
session.event_hub.publish(event, on_error='ignore')
if len(message) > 0:
message = "Unable to sync: {}".format(message)
return {

View file

@ -0,0 +1,239 @@
from pype.vendor import ftrack_api
from pype.ftrack import BaseEvent, lib
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
from bson.objectid import ObjectId
from pypeapp import config
from pypeapp import Anatomy
import subprocess
import os
import re
class UserAssigmentEvent(BaseEvent):
"""
This script will intercept user assigment / de-assigment event and
run shell script, providing as much context as possible.
It expects configuration file ``presets/ftrack/user_assigment_event.json``.
In it, you define paths to scripts to be run for user assigment event and
for user-deassigment::
{
"add": [
"/path/to/script1",
"/path/to/script2"
],
"remove": [
"/path/to/script3",
"/path/to/script4"
]
}
Those scripts are executed in shell. Three arguments will be passed to
to them:
1) user name of user (de)assigned
2) path to workfiles of task user was (de)assigned to
3) path to publish files of task user was (de)assigned to
"""
db_con = DbConnector()
ca_mongoid = lib.get_ca_mongoid()
def error(self, *err):
for e in err:
self.log.error(e)
def _run_script(self, script, args):
"""
Run shell script with arguments as subprocess
:param script: script path
:type script: str
:param args: list of arguments passed to script
:type args: list
:returns: return code
:rtype: int
"""
p = subprocess.call([script, args], shell=True)
return p
def _get_task_and_user(self, session, action, changes):
"""
Get Task and User entities from Ftrack session
:param session: ftrack session
:type session: ftrack_api.session
:param action: event action
:type action: str
:param changes: what was changed by event
:type changes: dict
:returns: User and Task entities
:rtype: tuple
"""
if not changes:
return None, None
if action == 'add':
task_id = changes.get('context_id', {}).get('new')
user_id = changes.get('resource_id', {}).get('new')
elif action == 'remove':
task_id = changes.get('context_id', {}).get('old')
user_id = changes.get('resource_id', {}).get('old')
if not task_id:
return None, None
if not user_id:
return None, None
task = session.query('Task where id is "{}"'.format(task_id)).one()
user = session.query('User where id is "{}"'.format(user_id)).one()
return task, user
def _get_asset(self, task):
"""
Get asset from task entity
:param task: Task entity
:type task: dict
:returns: Asset entity
:rtype: dict
"""
parent = task['parent']
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = task['project']['full_name']
avalon_entity = None
parent_id = parent['custom_attributes'].get(self.ca_mongoid)
if parent_id:
parent_id = ObjectId(parent_id)
avalon_entity = self.db_con.find_one({
'_id': parent_id,
'type': 'asset'
})
if not avalon_entity:
avalon_entity = self.db_con.find_one({
'type': 'asset',
'name': parent['name']
})
if not avalon_entity:
self.db_con.uninstall()
msg = 'Entity "{}" not found in avalon database'.format(
parent['name']
)
self.error(msg)
return {
'success': False,
'message': msg
}
self.db_con.uninstall()
return avalon_entity
def _get_hierarchy(self, asset):
"""
Get hierarchy from Asset entity
:param asset: Asset entity
:type asset: dict
:returns: hierarchy string
:rtype: str
"""
return asset['data']['hierarchy']
def _get_template_data(self, task):
"""
Get data to fill template from task
.. seealso:: :mod:`pypeapp.Anatomy`
:param task: Task entity
:type task: dict
:returns: data for anatomy template
:rtype: dict
"""
project_name = task['project']['full_name']
project_code = task['project']['name']
try:
root = os.environ['PYPE_STUDIO_PROJECTS_PATH']
except KeyError:
msg = 'Project ({}) root not set'.format(project_name)
self.log.error(msg)
return {
'success': False,
'message': msg
}
# fill in template data
asset = self._get_asset(task)
t_data = {
'root': root,
'project': {
'name': project_name,
'code': project_code
},
'asset': asset['name'],
'task': task['name'],
'hierarchy': self._get_hierarchy(asset)
}
return t_data
def launch(self, session, event):
# load shell scripts presets
presets = config.get_presets()['ftrack']["user_assigment_event"]
if not presets:
return
for entity in event.get('data', {}).get('entities', []):
if entity.get('entity_type') != 'Appointment':
continue
task, user = self._get_task_and_user(session,
entity.get('action'),
entity.get('changes'))
if not task or not user:
self.log.error(
'Task or User was not found.')
continue
data = self._get_template_data(task)
# format directories to pass to shell script
anatomy = Anatomy(data["project"]["name"])
# formatting work dir is easiest part as we can use whole path
work_dir = anatomy.format(data)['avalon']['work']
# we also need publish but not whole
publish = anatomy.format_all(data)['partial']['avalon']['publish']
# now find path to {asset}
m = re.search("(^.+?{})".format(data['asset']),
publish)
if not m:
msg = 'Cannot get part of publish path {}'.format(publish)
self.log.error(msg)
return {
'success': False,
'message': msg
}
publish_dir = m.group(1)
for script in presets.get(entity.get('action')):
self.log.info(
'[{}] : running script for user {}'.format(
entity.get('action'), user["username"]))
self._run_script(script, [user["username"],
work_dir, publish_dir])
return True
def register(session, **kw):
"""
Register plugin. Called when used as an plugin.
"""
if not isinstance(session, ftrack_api.session.Session):
return
UserAssigmentEvent(session).register()

View file

@ -94,6 +94,9 @@ class AppAction(BaseHandler):
):
return False
if entities[0]['parent'].entity_type.lower() == 'project':
return False
ft_project = entities[0]['project']
database = pypelib.get_avalon_database()

View file

@ -299,21 +299,31 @@ class FtrackEventsThread(QtCore.QThread):
self.signal_timer_stopped.emit()
def ftrack_stop_timer(self):
try:
actual_timer = self.timer_session.query(
'Timer where user_id = "{0}"'.format(self.user['id'])
).first()
if actual_timer is not None:
self.user.stop_timer()
self.timer_session.commit()
self.signal_timer_stopped.emit()
except Exception as e:
log.debug("Timer stop had issues: {}".format(e))
def ftrack_start_timer(self, input_data):
if self.user is None:
return
actual_timer = self.timer_session.query(
'Timer where user_id = "{0}"'.format(self.user['id'])
).first()
if (
actual_timer is not None and
input_data['task_name'] == self.last_task['name'] and
input_data['hierarchy'][-1] == self.last_task['parent']['name']
):
return
input_data['entity_name'] = input_data['hierarchy'][-1]
task_query = (
'Task where name is "{task_name}"'
' and parent.name is "{entity_name}"'

View file

@ -2308,3 +2308,89 @@ def get_attr_in_layer(attr, layer):
return value
return cmds.getAttr(attr)
def _null(*args):
pass
class shelf():
'''A simple class to build shelves in maya. Since the build method is empty,
it should be extended by the derived class to build the necessary shelf
elements. By default it creates an empty shelf called "customShelf".'''
###########################################################################
'''This is an example shelf.'''
# class customShelf(_shelf):
# def build(self):
# self.addButon(label="button1")
# self.addButon("button2")
# self.addButon("popup")
# p = cmds.popupMenu(b=1)
# self.addMenuItem(p, "popupMenuItem1")
# self.addMenuItem(p, "popupMenuItem2")
# sub = self.addSubMenu(p, "subMenuLevel1")
# self.addMenuItem(sub, "subMenuLevel1Item1")
# sub2 = self.addSubMenu(sub, "subMenuLevel2")
# self.addMenuItem(sub2, "subMenuLevel2Item1")
# self.addMenuItem(sub2, "subMenuLevel2Item2")
# self.addMenuItem(sub, "subMenuLevel1Item2")
# self.addMenuItem(p, "popupMenuItem3")
# self.addButon("button3")
# customShelf()
###########################################################################
def __init__(self, name="customShelf", iconPath="", preset={}):
self.name = name
self.iconPath = iconPath
self.labelBackground = (0, 0, 0, 0)
self.labelColour = (.9, .9, .9)
self.preset = preset
self._cleanOldShelf()
cmds.setParent(self.name)
self.build()
def build(self):
'''This method should be overwritten in derived classes to actually
build the shelf elements. Otherwise, nothing is added to the shelf.'''
for item in self.preset['items']:
if not item.get('command'):
item['command'] = self._null
if item['type'] == 'button':
self.addButon(item['name'], command=item['command'])
if item['type'] == 'menuItem':
self.addMenuItem(item['parent'], item['name'], command=item['command'])
if item['type'] == 'subMenu':
self.addMenuItem(item['parent'], item['name'], command=item['command'])
def addButon(self, label, icon="commandButton.png", command=_null, doubleCommand=_null):
'''Adds a shelf button with the specified label, command, double click command and image.'''
cmds.setParent(self.name)
if icon:
icon = self.iconPath + icon
cmds.shelfButton(width=37, height=37, image=icon, l=label, command=command, dcc=doubleCommand, imageOverlayLabel=label, olb=self.labelBackground, olc=self.labelColour)
def addMenuItem(self, parent, label, command=_null, icon=""):
'''Adds a shelf button with the specified label, command, double click command and image.'''
if icon:
icon = self.iconPath + icon
return cmds.menuItem(p=parent, l=label, c=command, i="")
def addSubMenu(self, parent, label, icon=None):
'''Adds a sub menu item with the specified label and icon to the specified parent popup menu.'''
if icon:
icon = self.iconPath + icon
return cmds.menuItem(p=parent, l=label, i=icon, subMenu=1)
def _cleanOldShelf(self):
'''Checks if the shelf exists and empties it if it does or creates it if it does not.'''
if cmds.shelfLayout(self.name, ex=1):
if cmds.shelfLayout(self.name, q=1, ca=1):
for each in cmds.shelfLayout(self.name, q=1, ca=1):
cmds.deleteUI(each)
else:
cmds.shelfLayout(self.name, p="ShelfLayout")

View file

@ -132,9 +132,6 @@ def install():
menu.install()
# load data from templates
api.load_data_from_templates()
# Workfiles.
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
@ -156,9 +153,6 @@ def uninstall():
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
# reset data from templates
api.reset_data_from_templates()
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""

View file

@ -7,8 +7,9 @@ import avalon.nuke
import pype.api as pype
import nuke
from .templates import (
get_dataflow,
get_colorspace
get_colorspace_preset,
get_node_dataflow_preset,
get_node_colorspace_preset
)
from pypeapp import Logger
@ -17,14 +18,6 @@ log = Logger().get_logger(__name__, "nuke")
self = sys.modules[__name__]
self._project = None
for path in sys.path:
log.info(os.path.normpath(path))
if "C:\\Users\\Public" in os.path.normpath(path):
log.info("_ removing from sys.path: `{}`".format(path))
sys.path.remove(path)
def onScriptLoad():
if nuke.env['LINUX']:
nuke.tcl('load ffmpegReader')
@ -129,8 +122,8 @@ def get_render_path(node):
"preset": data['avalon']['families']
}
nuke_dataflow_writes = get_dataflow(**data_preset)
nuke_colorspace_writes = get_colorspace(**data_preset)
nuke_dataflow_writes = get_node_dataflow_preset(**data_preset)
nuke_colorspace_writes = get_node_colorspace_preset(**data_preset)
application = lib.get_application(os.environ["AVALON_APP_NAME"])
data.update({
@ -180,8 +173,8 @@ def script_name():
def create_write_node(name, data):
nuke_dataflow_writes = get_dataflow(**data)
nuke_colorspace_writes = get_colorspace(**data)
nuke_dataflow_writes = get_node_dataflow_preset(**data)
nuke_colorspace_writes = get_node_colorspace_preset(**data)
application = lib.get_application(os.environ["AVALON_APP_NAME"])
try:
@ -319,9 +312,8 @@ def set_writes_colorspace(write_dict):
def set_colorspace():
from pype import api as pype
nuke_colorspace = pype.Colorspace.get("nuke", None)
nuke_colorspace = get_colorspace_preset().get("nuke", None)
try:
set_root_colorspace(nuke_colorspace["root"])
@ -350,8 +342,7 @@ def set_colorspace():
def reset_frame_range_handles():
"""Set frame range to current asset"""
fps = float(api.Session.get("AVALON_FPS", 25))
nuke.root()["fps"].setValue(fps)
root = nuke.root()
name = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": name, "type": "asset"})
@ -363,7 +354,7 @@ def reset_frame_range_handles():
data = asset["data"]
missing_cols = []
check_cols = ["fstart", "fend", "handle_start", "handle_end"]
check_cols = ["fps", "fstart", "fend", "handle_start", "handle_end"]
for col in check_cols:
if col not in data:
@ -380,20 +371,29 @@ def reset_frame_range_handles():
handles = avalon.nuke.get_handles(asset)
handle_start, handle_end = pype.get_handle_irregular(asset)
log.info("__ handles: `{}`".format(handles))
log.info("__ handle_start: `{}`".format(handle_start))
log.info("__ handle_end: `{}`".format(handle_end))
fps = asset["data"]["fps"]
edit_in = int(asset["data"]["fstart"]) - handle_start
edit_out = int(asset["data"]["fend"]) + handle_end
nuke.root()["first_frame"].setValue(edit_in)
nuke.root()["last_frame"].setValue(edit_out)
root["fps"].setValue(fps)
root["first_frame"].setValue(edit_in)
root["last_frame"].setValue(edit_out)
log.info("__ handles: `{}`".format(handles))
log.info("__ handle_start: `{}`".format(handle_start))
log.info("__ handle_end: `{}`".format(handle_end))
log.info("__ edit_in: `{}`".format(edit_in))
log.info("__ edit_out: `{}`".format(edit_out))
log.info("__ fps: `{}`".format(fps))
# setting active viewers
nuke.frame(int(asset["data"]["fstart"]))
vv = nuke.activeViewer().node()
try:
vv = nuke.activeViewer().node()
except AttributeError:
log.error("No active viewer. Select any node and hit num `1`")
return
range = '{0}-{1}'.format(
int(asset["data"]["fstart"]),
@ -408,6 +408,13 @@ def reset_frame_range_handles():
vv['frame_range'].setValue(range)
vv['frame_range_lock'].setValue(True)
# adding handle_start/end to root avalon knob
if not avalon.nuke.set_avalon_knob_data(root, {
"handle_start": handle_start,
"handle_end": handle_end
}):
log.warning("Cannot set Avalon knob to Root node!")
def get_avalon_knob_data(node):
import toml
@ -560,8 +567,8 @@ def get_hierarchical_attr(entity, attr, default=None):
parent_id = entity['parent']
if (
entity['type'].lower() == 'asset' and
entity.get('data', {}).get('visualParent')
entity['type'].lower() == 'asset'
and entity.get('data', {}).get('visualParent')
):
parent_id = entity['data']['visualParent']
@ -637,8 +644,8 @@ def get_write_node_template_attr(node):
}
# get template data
nuke_dataflow_writes = get_dataflow(**data_preset)
nuke_colorspace_writes = get_colorspace(**data_preset)
nuke_dataflow_writes = get_node_dataflow_preset(**data_preset)
nuke_colorspace_writes = get_node_colorspace_preset(**data_preset)
# collecting correct data
correct_data = OrderedDict({

View file

@ -1,21 +1,33 @@
from pype import api as pype
from pypeapp import Anatomy, config
log = pype.Logger().get_logger(__name__, "nuke")
def get_anatomy(**kwarg):
return pype.Anatomy
return Anatomy()
def get_dataflow(**kwarg):
def get_dataflow_preset():
presets = config.get_init_presets()
return presets["dataflow"]
def get_colorspace_preset():
presets = config.get_init_presets()
return presets["colorspace"]
def get_node_dataflow_preset(**kwarg):
log.info(kwarg)
host = kwarg.get("host", "nuke")
cls = kwarg.get("class", None)
preset = kwarg.get("preset", None)
assert any([host, cls]), log.error("nuke.templates.get_dataflow():"
"Missing mandatory kwargs `host`, `cls`")
assert any([host, cls]), log.error("nuke.templates.get_node_dataflow_preset(): \
Missing mandatory kwargs `host`, `cls`")
nuke_dataflow = pype.Dataflow.get(str(host), None)
nuke_dataflow = get_dataflow_preset().get(str(host), None)
nuke_dataflow_nodes = nuke_dataflow.get('nodes', None)
nuke_dataflow_node = nuke_dataflow_nodes.get(str(cls), None)
@ -26,15 +38,15 @@ def get_dataflow(**kwarg):
return nuke_dataflow_node
def get_colorspace(**kwarg):
def get_node_colorspace_preset(**kwarg):
log.info(kwarg)
host = kwarg.get("host", "nuke")
cls = kwarg.get("class", None)
preset = kwarg.get("preset", None)
assert any([host, cls]), log.error("nuke.templates.get_colorspace():"
"Missing mandatory kwargs `host`, `cls`")
assert any([host, cls]), log.error("nuke.templates.get_node_colorspace_preset(): \
Missing mandatory kwargs `host`, `cls`")
nuke_colorspace = pype.Colorspace.get(str(host), None)
nuke_colorspace = get_colorspace_preset().get(str(host), None)
nuke_colorspace_node = nuke_colorspace.get(str(cls), None)
if preset:
nuke_colorspace_node = nuke_colorspace_node.get(str(preset), None)

View file

@ -55,9 +55,6 @@ def install(config):
menu_install()
# load data from templates
api.load_data_from_templates()
# Workfiles.
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
@ -95,9 +92,6 @@ def uninstall():
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
# reset data from templates
api.reset_data_from_templates()
def _register_events():
avalon.on("taskChanged", _update_menu_task_label)

View file

@ -1,4 +1,5 @@
import re
import os
from pypeapp import (
config,
@ -77,19 +78,20 @@ def add_tags_from_presets():
# Get project assets. Currently Ftrack specific to differentiate between
# asset builds and shots.
nks_pres_tags["[AssetBuilds]"] = {}
for asset in io.find({"type": "asset"}):
if asset["data"]["entityType"] == "AssetBuild":
nks_pres_tags["[AssetBuilds]"][asset["name"]] = {
"editable": "1",
"note": "",
"icon": {
"path": "icons:TagActor.png"
},
"metadata": {
"family": "assetbuild"
if int(os.getenv("TAG_ASSETBUILD_STARTUP", 0)) is 1:
nks_pres_tags["[AssetBuilds]"] = {}
for asset in io.find({"type": "asset"}):
if asset["data"]["entityType"] == "AssetBuild":
nks_pres_tags["[AssetBuilds]"][asset["name"]] = {
"editable": "1",
"note": "",
"icon": {
"path": "icons:TagActor.png"
},
"metadata": {
"family": "assetbuild"
}
}
}
# get project and root bin object
project = hiero.core.projects()[-1]

View file

@ -18,6 +18,9 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
ftrack_log = logging.getLogger('ftrack_api')
ftrack_log.setLevel(logging.WARNING)
ftrack_log = logging.getLogger('ftrack_api_old')
ftrack_log.setLevel(logging.WARNING)
# Collect session
session = ftrack_api.Session()
context.data["ftrackSession"] = session

View file

@ -16,7 +16,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
family_mapping = {'camera': 'cam',
'look': 'look',
'mayaAscii': 'scene',
'mayaascii': 'scene',
'model': 'geo',
'rig': 'rig',
'setdress': 'setdress',
@ -49,14 +49,15 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
for comp in instance.data['representations']:
self.log.debug('component {}'.format(comp))
if comp.get('thumbnail'):
if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])):
location = self.get_ftrack_location(
'ftrack.server', ft_session
)
component_data = {
"name": "thumbnail" # Default component name is "main".
}
elif comp.get('preview'):
comp['thumbnail'] = True
elif comp.get('preview') or ("preview" in comp.get('tags', [])):
'''
Ftrack bug requirement:
- Start frame must be 0
@ -120,7 +121,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
componentList.append(component_item)
# Create copy with ftrack.unmanaged location if thumb or prev
if comp.get('thumbnail') or comp.get('preview'):
if comp.get('thumbnail') or comp.get('preview') \
or ("preview" in comp.get('tags', [])) \
or ("thumbnail" in comp.get('tags', [])):
unmanaged_loc = self.get_ftrack_location(
'ftrack.unmanaged', ft_session
)
@ -148,7 +151,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
componentList.append(component_item_src)
self.log.debug('componentsList: {}'.format(str(componentList)))
instance.data["ftrackComponentsList"] = componentList

View file

@ -0,0 +1,86 @@
import os
import pyblish.api
import subprocess
from pype.vendor import clique
class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Extract Quicktime"
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
hosts = ["shell"]
def process(self, instance):
# fps = instance.data.get("fps")
# start = instance.data.get("startFrame")
# stagingdir = os.path.normpath(instance.data.get("stagingDir"))
#
# collected_frames = os.listdir(stagingdir)
# collections, remainder = clique.assemble(collected_frames)
#
# full_input_path = os.path.join(
# stagingdir, collections[0].format('{head}{padding}{tail}')
# )
# self.log.info("input {}".format(full_input_path))
#
# filename = collections[0].format('{head}')
# if not filename.endswith('.'):
# filename += "."
# movFile = filename + "mov"
# full_output_path = os.path.join(stagingdir, movFile)
#
# self.log.info("output {}".format(full_output_path))
#
# config_data = instance.context.data['output_repre_config']
#
# proj_name = os.environ.get('AVALON_PROJECT', '__default__')
# profile = config_data.get(proj_name, config_data['__default__'])
#
# input_args = []
# # overrides output file
# input_args.append("-y")
# # preset's input data
# input_args.extend(profile.get('input', []))
# # necessary input data
# input_args.append("-start_number {}".format(start))
# input_args.append("-i {}".format(full_input_path))
# input_args.append("-framerate {}".format(fps))
#
# output_args = []
# # preset's output data
# output_args.extend(profile.get('output', []))
# # output filename
# output_args.append(full_output_path)
# mov_args = [
# "ffmpeg",
# " ".join(input_args),
# " ".join(output_args)
# ]
# subprocess_mov = " ".join(mov_args)
# sub_proc = subprocess.Popen(subprocess_mov)
# sub_proc.wait()
#
# if not os.path.isfile(full_output_path):
# raise("Quicktime wasn't created succesfully")
#
# if "representations" not in instance.data:
# instance.data["representations"] = []
#
# representation = {
# 'name': 'mov',
# 'ext': 'mov',
# 'files': movFile,
# "stagingDir": stagingdir,
# "preview": True
# }
# instance.data["representations"].append(representation)

View file

@ -6,6 +6,7 @@ from pprint import pformat
import pyblish.api
from avalon import api
import pype.api as pype
def collect(root,
@ -64,7 +65,7 @@ def collect(root,
return collections
class CollectFileSequences(pyblish.api.ContextPlugin):
class CollectRenderedFrames(pyblish.api.ContextPlugin):
"""Gather file sequences from working directory
When "FILESEQUENCE" environment variable is set these paths (folders or
@ -87,7 +88,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder
targets = ["filesequence"]
label = "File Sequences"
label = "RenderedFrames"
def process(self, context):
if os.environ.get("PYPE_PUBLISH_PATHS"):
@ -128,6 +129,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
self.log.info("setting session using metadata")
api.Session.update(session)
os.environ.update(session)
else:
# Search in directory
data = dict()
@ -161,6 +163,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
assert isinstance(families, (list, tuple)), "Must be iterable"
assert families, "Must have at least a single family"
families.append("ftrack")
families.append("review")
for collection in collections:
instance = context.create_instance(str(collection))
self.log.info("Collection: %s" % list(collection))
@ -205,7 +208,8 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
'files': list(collection),
"stagingDir": root,
"anatomy_template": "render",
"frameRate": fps
"frameRate": fps,
"tags": ['review']
}
instance.data["representations"].append(representation)

View file

@ -12,6 +12,5 @@ class CollectTemplates(pyblish.api.ContextPlugin):
label = "Collect Templates"
def process(self, context):
# pype.load_data_from_templates()
context.data['anatomy'] = Anatomy()
self.log.info("Anatomy templates collected...")

View file

@ -0,0 +1,91 @@
import os
import subprocess
import pype.api
import json
import pyblish
class ExtractBurnin(pype.api.Extractor):
"""
Extractor to create video with pre-defined burnins from
existing extracted video representation.
It will work only on represenations having `burnin = True` or
`tags` including `burnin`
"""
label = "Quicktime with burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
optional = True
def process(self, instance):
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
# TODO: expand burnin data list to include all usefull keys
version = ''
if instance.context.data.get('version'):
version = "v" + str(instance.context.data['version'])
prep_data = {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
"start_frame": int(instance.data['startFrame']),
"version": version
}
self.log.debug("__ prep_data: {}".format(prep_data))
for i, repre in enumerate(instance.data["representations"]):
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
if "burnin" not in repre.get("tags", []):
continue
stagingdir = repre["stagingDir"]
filename = "{0}".format(repre["files"])
name = "_burnin"
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
full_movie_path = os.path.join(os.path.normpath(stagingdir), repre["files"])
full_burnin_path = os.path.join(os.path.normpath(stagingdir), movieFileBurnin)
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
burnin_data = {
"input": full_movie_path.replace("\\", "/"),
"output": full_burnin_path.replace("\\", "/"),
"burnin_data": prep_data
}
self.log.debug("__ burnin_data2: {}".format(burnin_data))
json_data = json.dumps(burnin_data)
scriptpath = os.path.normpath(os.path.join(os.environ['PYPE_MODULE_ROOT'],
"pype",
"scripts",
"otio_burnin.py"))
self.log.debug("__ scriptpath: {}".format(scriptpath))
self.log.debug("__ EXE: {}".format(os.getenv("PYPE_PYTHON_EXE")))
try:
p = subprocess.Popen(
[os.getenv("PYPE_PYTHON_EXE"), scriptpath, json_data]
)
p.wait()
if not os.path.isfile(full_burnin_path):
raise RuntimeError("File not existing: {}".format(full_burnin_path))
except Exception as e:
raise RuntimeError("Burnin script didn't work: `{}`".format(e))
if os.path.exists(full_burnin_path):
repre_update = {
"files": movieFileBurnin,
"name": repre["name"]
}
instance.data["representations"][i].update(repre_update)
# removing the source mov file
os.remove(full_movie_path)
self.log.debug("Removed: `{}`".format(full_movie_path))

View file

@ -1,86 +0,0 @@
import os
import pyblish.api
import subprocess
from pype.vendor import clique
class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Extract Quicktime"
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
hosts = ["shell"]
def process(self, instance):
fps = instance.data.get("fps")
start = instance.data.get("startFrame")
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
collected_frames = os.listdir(stagingdir)
collections, remainder = clique.assemble(collected_frames)
full_input_path = os.path.join(
stagingdir, collections[0].format('{head}{padding}{tail}')
)
self.log.info("input {}".format(full_input_path))
filename = collections[0].format('{head}')
if not filename.endswith('.'):
filename += "."
movFile = filename + "mov"
full_output_path = os.path.join(stagingdir, movFile)
self.log.info("output {}".format(full_output_path))
config_data = instance.context.data['output_repre_config']
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
input_args.append("-start_number {}".format(start))
input_args.append("-i {}".format(full_input_path))
input_args.append("-framerate {}".format(fps))
output_args = []
# preset's output data
output_args.extend(profile.get('output', []))
# output filename
output_args.append(full_output_path)
mov_args = [
"ffmpeg",
" ".join(input_args),
" ".join(output_args)
]
subprocess_mov = " ".join(mov_args)
sub_proc = subprocess.Popen(subprocess_mov)
sub_proc.wait()
if not os.path.isfile(full_output_path):
raise("Quicktime wasn't created succesfully")
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': 'mov',
'files': movFile,
"stagingDir": stagingdir,
"preview": True
}
instance.data["representations"].append(representation)

View file

@ -0,0 +1,174 @@
import os
import pyblish.api
import subprocess
from pype.vendor import clique
from pypeapp import config
class ExtractReview(pyblish.api.InstancePlugin):
"""Extracting Review mov file for Ftrack
Compulsory attribute of representation is tags list with "review",
otherwise the representation is ignored.
All new represetnations are created and encoded by ffmpeg following
presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension
filter values use preset's attributes `ext_filter`
"""
label = "Extract Review"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
def process(self, instance):
# adding plugin attributes from presets
publish_presets = config.get_presets()["plugins"]["global"]["publish"]
plugin_attrs = publish_presets[self.__class__.__name__]
output_profiles = plugin_attrs.get("outputs", {})
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("startFrame")
self.log.debug("Families In: `{}`".format(instance.data["families"]))
# get representation and loop them
representations = instance.data["representations"]
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
if repre['ext'] in plugin_attrs["ext_filter"]:
tags = repre.get("tags", [])
self.log.info("Try repre: {}".format(repre))
if "review" in tags:
staging_dir = repre["stagingDir"]
for name, profile in output_profiles.items():
self.log.debug("Profile name: {}".format(name))
ext = profile.get("ext", None)
if not ext:
ext = "mov"
self.log.warning(
"`ext` attribute not in output profile. Setting to default ext: `mov`")
self.log.debug("instance.families: {}".format(instance.data['families']))
self.log.debug("profile.families: {}".format(profile['families']))
if any(item in instance.data['families'] for item in profile['families']):
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
repre_new = repre.copy()
new_tags = tags[:]
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
if "mov" not in repre_new['ext']:
input_args.append("-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
output_args = []
# preset's output data
output_args.extend(profile.get('output', []))
# letter_box
# TODO: add to documentation
lb = profile.get('letter_box', None)
if lb:
output_args.append(
"-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
# output filename
output_args.append(full_output_path)
mov_args = [
"ffmpeg",
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("{}".format(subprcs_cmd))
sub_proc = subprocess.Popen(subprcs_cmd)
sub_proc.wait()
if not os.path.isfile(full_output_path):
raise FileExistsError(
"Quicktime wasn't created succesfully")
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
representations_new.append(repre_new)
# if "delete" in tags:
# if "mov" in full_input_path:
# os.remove(full_input_path)
# self.log.debug("Removed: `{}`".format(full_input_path))
else:
continue
else:
continue
self.log.debug(
"new representations: {}".format(representations_new))
instance.data["representations"] = representations_new
self.log.debug("Families Out: `{}`".format(instance.data["families"]))

View file

@ -4,7 +4,6 @@ import logging
import speedcopy
import clique
import traceback
import sys
import errno
import pyblish.api
from avalon import api, io
@ -59,7 +58,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"render",
"imagesequence",
"review",
"nukescript",
"render",
"rendersetup",
"rig",
@ -101,18 +99,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# \ /
# o __/
#
for result in context.data["results"]:
if not result["success"]:
self.log.debug(result)
exc_type, exc_value, exc_traceback = result["error_info"]
extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
self.log.debug(
"Error at line {}: \"{}\"".format(
extracted_traceback[1], result["error"]
)
)
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# for result in context.data["results"]:
# if not result["success"]:
# self.log.debug(result)
# exc_type, exc_value, exc_traceback = result["error_info"]
# extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
# self.log.debug(
# "Error at line {}: \"{}\"".format(
# extracted_traceback[1], result["error"]
# )
# )
# assert all(result["success"] for result in context.data["results"]), (
# "Atomicity not held, aborting.")
# Assemble
#
@ -227,17 +225,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"task": TASK,
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
@ -259,6 +246,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# | ||
# |_______|
#
# create template data for Anatomy
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"task": TASK,
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
files = repre['files']
if repre.get('stagingDir'):
@ -273,47 +271,52 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug(
"src_tail_collections: {}".format(str(src_collections)))
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = src_collection.format("{tail}")
# fix dst_padding
padd_len = len(files[0].replace(src_head, "").replace(src_tail, ""))
src_padding_exp = "%0{}d".format(padd_len)
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = repre['ext']
template_data["frame"] = src_collection.format(
"{padding}") % i
template_data["frame"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(
os.path.normpath(
anatomy_filled[template_name]["path"])
)
self.log.debug(
"test_dest_files: {}".format(str(test_dest_files)))
self.log.debug(
"test_dest_files: {}".format(str(test_dest_files)))
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
repre['published_path'] = dst_collection.format()
index_frame_start = None
if repre.get('startFrame'):
frame_start_padding = len(str(
repre.get('endFrame')))
index_frame_start = repre.get('startFrame')
dst_padding_exp = src_padding_exp
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = dst_collection.format("{padding}") % i
dst_padding = src_padding_exp % i
if index_frame_start:
dst_padding = "%0{}d".format(
frame_start_padding) % index_frame_start
dst_padding_exp = "%0{}d".format(frame_start_padding)
dst_padding = dst_padding_exp % index_frame_start
index_frame_start += 1
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
@ -322,6 +325,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
repre['published_path'] = "{0}{1}{2}".format(dst_head, dst_padding_exp, dst_tail)
# for imagesequence version data
hashes = '#' * len(dst_padding)
dst = os.path.normpath("{0}{1}{2}".format(
@ -344,6 +348,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template_data["representation"] = repre['ext']
if repre.get("outputName"):
template_data["output"] = repre['outputName']
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = os.path.normpath(
@ -378,7 +385,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"representation": repre['ext']
}
}
self.log.debug("__ _representation: {}".format(representation))
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
self.log.debug("__ destination_list: {}".format(destination_list))
instance.data['destination_list'] = destination_list
@ -541,7 +548,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
"startFrame", "endFrame", "step", "handles", "sourceHashes"
"startFrame", "endFrame", "step", "handles",
"handle_end", "handle_start", "sourceHashes"
]
for key in optionals:
if key in instance.data:

View file

@ -276,7 +276,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"families": ["render"],
"source": source,
"user": context.data["user"],
"version": context.data["version"],
# Optional metadata (for debugging)
"metadata": {
"instance": data,

View file

@ -44,10 +44,7 @@ class PremierePro(api.Action):
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
pype.load_data_from_templates()
os.environ["AVALON_WORKDIR"] = pype.get_workdir_template(
pype.Anatomy)
pype.reset_data_from_templates()
os.environ["AVALON_WORKDIR"] = pype.get_workdir_template()
env.update(dict(os.environ))

View file

@ -1,52 +0,0 @@
import pype.maya.plugin
import os
from pypeapp import config
class AbcLoader(pype.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the pype.animation family"""
families = ["animation",
"pointcache"]
label = "Reference animation"
representations = ["abc"]
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "animation"
groupName = "{}:{}".format(namespace, name)
cmds.loadPlugin("AbcImport.mll", quiet=True)
nodes = cmds.file(self.fname,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName="{}:{}".format(namespace, name),
reference=True,
returnNewNodes=True)
cmds.makeIdentity(groupName, apply=False, rotate=True,
translate=True, scale=True)
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -2,55 +2,10 @@ from avalon import api
import pype.maya.plugin
import os
from pypeapp import config
import pymel.core as pm
reload(config)
class ModelLoader(pype.maya.plugin.ReferenceLoader):
"""Load the model"""
families = ["model"]
representations = ["ma"]
tool_names = ["loader"]
label = "Reference Model"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
with maya.maintained_selection():
groupName = "{}:{}".format(namespace, name)
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=groupName)
cmds.makeIdentity(groupName, apply=False, rotate=True,
translate=True, scale=True)
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get('model')
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)
class GpuCacheLoader(api.Loader):
"""Load model Alembic as gpuCache"""
@ -148,51 +103,3 @@ class GpuCacheLoader(api.Loader):
deleteNamespaceContent=True)
except RuntimeError:
pass
class AbcModelLoader(pype.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the studio.animation family"""
families = ["model"]
representations = ["abc"]
tool_names = ["loader"]
label = "Reference Model"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
groupName = "{}:{}".format(namespace, name)
cmds.loadPlugin("AbcImport.mll", quiet=True)
nodes = cmds.file(self.fname,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName=groupName,
reference=True,
returnNewNodes=True)
namespace = cmds.referenceQuery(nodes[0], namespace=True)
groupName = "{}:{}".format(namespace, name)
cmds.makeIdentity(groupName, apply=False, rotate=True,
translate=True, scale=True)
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get('model')
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,85 @@
from avalon import api
import pype.maya.plugin
import os
from pypeapp import config
import pymel.core as pm
reload(config)
class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
"""Load the model"""
families = ["model", "pointcache", "animation"]
representations = ["ma", "abc"]
tool_names = ["loader"]
label = "Reference"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "model"
with maya.maintained_selection():
groupName = "{}:{}".format(namespace, name)
cmds.loadPlugin("AbcImport.mll", quiet=True)
nodes = cmds.file(self.fname,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName="{}:{}".format(namespace, name),
reference=True,
returnNewNodes=True)
namespace = cmds.referenceQuery(nodes[0], namespace=True)
groupNode = pm.PyNode(groupName)
roots = set()
print(nodes)
for node in nodes:
try:
roots.add(pm.PyNode(node).getAllParents()[-2])
except:
pass
for root in roots:
root.setParent(world=True)
groupNode.root().zeroTransformPivots()
for root in roots:
root.setParent(groupNode)
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
colors = presets['plugins']['maya']['load']['colors']
c = colors.get(family)
if c is not None:
groupNode.useOutlinerColor.set(1)
groupNode.outlinerColor.set(c[0], c[1], c[2])
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)
# for backwards compatibility
class AbcLoader(ReferenceLoader):
families = ["pointcache", "animation"]
representations = ["abc"]
tool_names = []
# for backwards compatibility
class ModelLoader(ReferenceLoader):
families = ["model", "pointcache"]
representations = ["abc"]
tool_names = []

View file

@ -0,0 +1,32 @@
import pyblish.api
class CollectFtrackFamilies(pyblish.api.InstancePlugin):
"""Collect model data
Ensures always only a single frame is extracted (current frame).
Note:
This is a workaround so that the `pype.model` family can use the
same pointcache extractor implementation as animation and pointcaches.
This always enforces the "current" frame to be published.
"""
order = pyblish.api.CollectorOrder + 0.3
label = 'Add ftrack family'
families = ["model",
"setdress",
"model",
"animation",
"workfile",
"look"
]
def process(self, instance):
# make ftrack publishable
if instance.data.get('families'):
instance.data['families'].append('ftrack')
else:
instance.data['families'] = ['ftrack']

View file

@ -0,0 +1,25 @@
from maya import cmds
import pyblish.api
class CollectMayaAscii(pyblish.api.InstancePlugin):
"""Collect May Ascii Data
"""
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Model Data'
families = ["mayaAscii"]
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
# make ftrack publishable
if instance.data.get('families'):
instance.data['families'].append('ftrack')
else:
instance.data['families'] = ['ftrack']

View file

@ -24,9 +24,3 @@ class CollectModelData(pyblish.api.InstancePlugin):
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
# make ftrack publishable
if instance.data.get('families'):
instance.data['families'].append('ftrack')
else:
instance.data['families'] = ['ftrack']

View file

@ -42,7 +42,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
data['representations'] = [{
'name': 'ma',
'ext': '.ma',
'ext': 'ma',
'files': file,
"stagingDir": folder,
}]

View file

@ -82,7 +82,7 @@ class ExtractAnimation(pype.api.Extractor):
representation = {
'name': 'abc',
'ext': '.abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname,
}

View file

@ -42,7 +42,7 @@ class ExtractAssStandin(pype.api.Extractor):
representation = {
'name': 'ass',
'ext': '.ass',
'ext': 'ass',
'files': filename,
"stagingDir": staging_dir
}

View file

@ -68,7 +68,7 @@ class ExtractAssProxy(pype.api.Extractor):
representation = {
'name': 'ma',
'ext': '.ma',
'ext': 'ma',
'files': filename,
"stagingDir": stagingdir
}

View file

@ -75,7 +75,7 @@ class ExtractCameraAlembic(pype.api.Extractor):
representation = {
'name': 'abc',
'ext': '.abc',
'ext': 'abc',
'files': filename,
"stagingDir": dir_path,
}

View file

@ -173,7 +173,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
representation = {
'name': 'ma',
'ext': '.ma',
'ext': 'ma',
'files': filename,
"stagingDir": dir_path,
}

View file

@ -213,7 +213,7 @@ class ExtractFBX(pype.api.Extractor):
representation = {
'name': 'mov',
'ext': '.mov',
'ext': 'mov',
'files': filename,
"stagingDir": stagingDir,
}

View file

@ -56,7 +56,7 @@ class ExtractMayaAsciiRaw(pype.api.Extractor):
representation = {
'name': 'ma',
'ext': '.ma',
'ext': 'ma',
'files': filename,
"stagingDir": dir_path
}

View file

@ -74,7 +74,7 @@ class ExtractModel(pype.api.Extractor):
representation = {
'name': 'ma',
'ext': '.ma',
'ext': 'ma',
'files': filename,
"stagingDir": stagingdir,
}

View file

@ -84,7 +84,7 @@ class ExtractAlembic(pype.api.Extractor):
representation = {
'name': 'abc',
'ext': '.abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname
}

View file

@ -99,7 +99,6 @@ class ExtractQuicktime(pype.api.Extractor):
playblast = capture_gui.lib.capture_scene(preset)
self.log.info("file list {}".format(playblast))
# self.log.info("Calculating HUD data overlay")
collected_frames = os.listdir(stagingdir)
collections, remainder = clique.assemble(collected_frames)
@ -107,61 +106,19 @@ class ExtractQuicktime(pype.api.Extractor):
stagingdir, collections[0].format('{head}{padding}{tail}'))
self.log.info("input {}".format(input_path))
movieFile = filename + ".mov"
movieFileBurnin = filename + "Burn" + ".mov"
full_movie_path = os.path.join(stagingdir, movieFile)
full_burnin_path = os.path.join(stagingdir, movieFileBurnin)
self.log.info("output {}".format(full_movie_path))
with avalon.maya.suspended_refresh():
try:
(
ffmpeg
.input(input_path, framerate=fps, start_number=int(start))
.output(full_movie_path)
.run(overwrite_output=True,
capture_stdout=True,
capture_stderr=True)
)
except ffmpeg.Error as e:
ffmpeg_error = 'ffmpeg error: {}'.format(e.stderr)
self.log.error(ffmpeg_error)
raise RuntimeError(ffmpeg_error)
version = instance.context.data['version']
burnin_data = {
"input": full_movie_path.replace("\\", "/"),
"output": full_burnin_path.replace("\\", "/"),
"burnin_data": {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
"start_frame": int(instance.data['startFrame']),
"version": "v" + str(version)
}
}
json_data = json.dumps(burnin_data)
scriptpath = os.path.join(os.environ['PYPE_MODULE_ROOT'], "pype", "scripts", "otio_burnin.py")
p = subprocess.Popen(
['python', scriptpath, json_data]
)
p.wait()
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': 'mov',
'files': movieFileBurnin,
'files': collected_frames,
"stagingDir": stagingdir,
'startFrame': start,
'endFrame': end,
'frameRate': fps,
'preview': True
'preview': True,
'tags': ['review']
}
instance.data["representations"].append(representation)

View file

@ -30,7 +30,7 @@ class ExtractRenderSetup(pype.api.Extractor):
representation = {
'name': 'json',
'ext': '.json',
'ext': 'json',
'files': json_filename,
"stagingDir": parent_dir,
}

View file

@ -39,7 +39,7 @@ class ExtractRig(pype.api.Extractor):
representation = {
'name': 'ma',
'ext': '.ma',
'ext': 'ma',
'files': filename,
"stagingDir": dir_path
}

View file

@ -137,7 +137,7 @@ class ExtractThumbnail(pype.api.Extractor):
representation = {
'name': 'thumbnail',
'ext': '.jpg',
'ext': 'jpg',
'files': thumbnail,
"stagingDir": stagingDir,
"thumbnail": True

View file

@ -59,7 +59,7 @@ class ExtractVRayProxy(pype.api.Extractor):
representation = {
'name': 'vrmesh',
'ext': '.vrmesh',
'ext': 'vrmesh',
'files': file_name,
"stagingDir": staging_dir,
}

View file

@ -0,0 +1,95 @@
import pymel.core as pm
import pyblish.api
import pype.api
class ValidateAttributes(pyblish.api.ContextPlugin):
"""Ensure attributes are consistent.
Attributes to validate and their values comes from the
"maya/attributes.json" preset, which needs this structure:
{
"family": {
"node_name.attribute_name": attribute_value
}
}
"""
order = pype.api.ValidateContentsOrder
label = "Attributes"
hosts = ["maya"]
actions = [pype.api.RepairContextAction]
def process(self, context):
# Check for preset existence.
if not context.data["presets"]["maya"].get("attributes"):
return
invalid = self.get_invalid(context, compute=True)
if invalid:
raise RuntimeError(
"Found attributes with invalid values: {}".format(invalid)
)
@classmethod
def get_invalid(cls, context, compute=False):
invalid = context.data.get("invalid_attributes", [])
if compute:
invalid = cls.get_invalid_attributes(context)
return invalid
@classmethod
def get_invalid_attributes(cls, context):
presets = context.data["presets"]["maya"]["attributes"]
invalid_attributes = []
for instance in context:
# Filter publisable instances.
if not instance.data["publish"]:
continue
# Filter families.
families = [instance.data["family"]]
families += instance.data.get("families", [])
families = list(set(families) & set(presets.keys()))
if not families:
continue
# Get all attributes to validate.
attributes = {}
for family in families:
for preset in presets[family]:
[node_name, attribute_name] = preset.split(".")
attributes.update(
{node_name: {attribute_name: presets[family][preset]}}
)
# Get invalid attributes.
nodes = [pm.PyNode(x) for x in instance]
for node in nodes:
name = node.name(stripNamespace=True)
if name not in attributes.keys():
continue
presets_to_validate = attributes[name]
for attribute in node.listAttr():
if attribute.attrName() in presets_to_validate:
expected = presets_to_validate[attribute.attrName()]
if attribute.get() != expected:
invalid_attributes.append(
{
"attribute": attribute,
"expected": expected,
"current": attribute.get()
}
)
context.data["invalid_attributes"] = invalid_attributes
return invalid_attributes
@classmethod
def repair(cls, instance):
invalid = cls.get_invalid(instance)
for data in invalid:
data["attribute"].set(data["expected"])

View file

@ -35,7 +35,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
DEFAULT_PADDING = 4
RENDERER_PREFIX = {"vray": "<Scene>/<Layer>/<Layer>"}
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>"
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
def process(self, instance):

View file

@ -0,0 +1,46 @@
import pyblish.api
@pyblish.api.log
class CollectRenderTarget(pyblish.api.InstancePlugin):
"""Collect families for all instances"""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Render Target"
hosts = ["nuke", "nukeassist"]
families = ['write']
def process(self, instance):
node = instance[0]
self.log.info('processing {}'.format(node))
families = []
if instance.data.get('families'):
families += instance.data['families']
# set for ftrack to accept
# instance.data["families"] = ["ftrack"]
if node["render"].value():
# dealing with local/farm rendering
if node["render_farm"].value():
families.append("render.farm")
else:
families.append("render.local")
else:
families.append("render.frames")
# to ignore staging dir op in integrate
instance.data['transfer'] = False
families.append('ftrack')
instance.data["families"] = families
# Sort/grouped by family (preserving local index)
instance.context[:] = sorted(instance.context, key=self.sort_by_family)
def sort_by_family(self, instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -12,7 +12,7 @@ class ExtractScript(pype.api.Extractor):
order = pyblish.api.ExtractorOrder - 0.05
optional = True
hosts = ['nuke']
families = ["nukescript"]
families = ["workfile"]
def process(self, instance):
self.log.debug("instance extracting: {}".format(instance.data))
@ -27,7 +27,7 @@ class ExtractScript(pype.api.Extractor):
shutil.copy(current_script, path)
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"] = list()
representation = {
'name': 'nk',

View file

@ -39,12 +39,14 @@ class LinkAsGroup(api.Loader):
precomp_name = context["representation"]["context"]["subset"]
self.log.info("versionData: {}\n".format(context["version"]["data"]))
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("startFrame", None)
self.log.info("start: {}\n".format(start))
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["startFrame", "endFrame", "handles",
"source", "author", "fps"]
add_keys = ["startFrame", "endFrame", "handle_start", "handle_end", "source", "author", "fps"]
data_imprint = {
"start_frame": start,

View file

@ -0,0 +1,14 @@
import pyblish.api
import nuke
class CollectActiveViewer(pyblish.api.ContextPlugin):
"""Collect any active viewer from nodes
"""
order = pyblish.api.CollectorOrder + 0.3
label = "Collect Active Viewer"
hosts = ["nuke"]
def process(self, context):
context.data["ViewerProcess"] = nuke.ViewerProcess.node()

View file

@ -1,10 +1,11 @@
import pyblish.api
class SelectCurrentFile(pyblish.api.ContextPlugin):
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Collect Current File"
hosts = ["nuke"]
def process(self, context):

View file

@ -64,11 +64,11 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
"name": node.name(),
"subset": subset,
"family": avalon_knob_data["family"],
"families": [family],
"families": [avalon_knob_data["family"], family],
"avalonKnob": avalon_knob_data,
"publish": node.knob('publish').value(),
"step": 1,
"fps": int(nuke.root()['fps'].value())
"fps": nuke.root()['fps'].value()
})

View file

@ -1,35 +1,46 @@
from avalon import api, io
import nuke
import pyblish.api
import os
from avalon.nuke.lib import (
add_publish_knob,
add_avalon_tab_knob
import pype.api as pype
from avalon.nuke import (
get_avalon_knob_data,
add_publish_knob
)
class CollectScript(pyblish.api.ContextPlugin):
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Publish current script version."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Script to publish"
label = "Collect Workfile"
hosts = ['nuke']
def process(self, context):
root = nuke.root()
add_avalon_tab_knob(root)
knob_data = get_avalon_knob_data(root)
add_publish_knob(root)
family = "nukescript"
family = "workfile"
# creating instances per write node
file_path = root['name'].value()
file_path = context.data["currentFile"]
staging_dir = os.path.dirname(file_path)
base_name = os.path.basename(file_path)
subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family)
# get version string
version = pype.get_version_from_path(base_name)
# Get frame range
first_frame = int(root["first_frame"].getValue())
last_frame = int(root["last_frame"].getValue())
handle_start = int(knob_data.get("handle_start", 0))
handle_end = int(knob_data.get("handle_end", 0))
# Get format
format = root['format'].value()
resolution_width = format.width()
@ -40,22 +51,47 @@ class CollectScript(pyblish.api.ContextPlugin):
instance = context.create_instance(subset)
instance.add(root)
instance.data.update({
"subset": subset,
script_data = {
"asset": os.getenv("AVALON_ASSET", None),
"label": base_name,
"name": base_name,
"startFrame": first_frame,
"endFrame": last_frame,
"version": version,
"startFrame": first_frame + handle_start,
"endFrame": last_frame - handle_end,
"resolution_width": resolution_width,
"resolution_height": resolution_height,
"pixel_aspect": pixel_aspect,
# backward compatibility
"handles": handle_start,
"handle_start": handle_start,
"handle_end": handle_end,
"step": 1,
"fps": root['fps'].value(),
}
context.data.update(script_data)
# creating instance data
instance.data.update({
"subset": subset,
"label": base_name,
"name": base_name,
"publish": root.knob('publish').value(),
"family": family,
"representation": "nk",
"handles": context.data['handles'],
"step": 1,
"fps": int(root['fps'].value()),
"representations": list()
})
# adding basic script data
instance.data.update(script_data)
# creating representation
representation = {
'name': 'nk',
'ext': 'nk',
'files': base_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info('Publishing script version')
context.data["instances"].append(instance)

View file

@ -11,7 +11,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Writes"
hosts = ["nuke", "nukeassist"]
families = ["render.local", "render", "render.farm"]
families = ["render", "render.local", "render.farm"]
def process(self, instance):
@ -66,19 +66,20 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
instance.data['families'].append('ftrack')
if "representations" not in instance.data:
instance.data["representations"] = list()
try:
collected_frames = os.listdir(output_dir)
representation = {
'name': ext,
'ext': ext,
'files': collected_frames,
"stagingDir": output_dir,
"anatomy_template": "render"
}
instance.data["representations"].append(representation)
try:
collected_frames = os.listdir(output_dir)
representation['files'] = collected_frames
instance.data["representations"].append(representation)
except Exception:
instance.data["representations"].append(representation)
self.log.debug("couldn't collect frames: {}".format(label))
if 'render.local' in instance.data['families']:
@ -96,5 +97,4 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"colorspace": node["colorspace"].value(),
})
self.log.debug("instance.data: {}".format(instance.data))

View file

@ -21,7 +21,6 @@ class NukeRenderLocal(pype.api.Extractor):
def process(self, instance):
node = instance[0]
context = instance.context
self.log.debug("instance collected: {}".format(instance.data))
@ -29,12 +28,6 @@ class NukeRenderLocal(pype.api.Extractor):
last_frame = instance.data.get("endFrame", None)
node_subset_name = instance.data.get("name", None)
# swap path to stageDir
temp_dir = self.staging_dir(instance).replace("\\", "/")
output_dir = instance.data.get("outputDir")
path = node['file'].value()
node['file'].setValue(path.replace(output_dir, temp_dir))
self.log.info("Starting render")
self.log.info("Start frame: {}".format(first_frame))
self.log.info("End frame: {}".format(last_frame))
@ -46,27 +39,26 @@ class NukeRenderLocal(pype.api.Extractor):
int(last_frame)
)
# swap path back to publish path
path = node['file'].value()
node['file'].setValue(path.replace(temp_dir, output_dir))
out_dir = os.path.dirname(path)
ext = node["file_type"].value()
if "representations" not in instance.data:
instance.data["representations"] = []
collected_frames = os.listdir(temp_dir)
collected_frames = os.listdir(out_dir)
repre = {
'name': ext,
'ext': ext,
'files': collected_frames,
"stagingDir": temp_dir,
"stagingDir": out_dir,
"anatomy_template": "render"
}
instance.data["representations"].append(repre)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name,
temp_dir
out_dir
))
instance.data['family'] = 'render'

View file

@ -2,10 +2,9 @@ import os
import nuke
import pyblish.api
import pype
from pype.vendor import ffmpeg
class ExtractDataForReview(pype.api.Extractor):
class ExtractReviewData(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
@ -13,8 +12,7 @@ class ExtractDataForReview(pype.api.Extractor):
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Review"
optional = True
label = "Extract Review Data"
families = ["review"]
hosts = ["nuke"]
@ -35,63 +33,15 @@ class ExtractDataForReview(pype.api.Extractor):
if "still" not in instance.data["families"]:
self.render_review_representation(instance,
representation="mov")
self.log.debug("review mov:")
self.transcode_mov(instance)
self.log.debug("instance.data: {}".format(instance.data))
self.render_review_representation(instance,
representation="jpeg")
else:
self.log.debug("instance: {}".format(instance))
self.render_review_representation(instance, representation="jpeg")
# Restore selection
[i["selected"].setValue(False) for i in nuke.allNodes()]
[i["selected"].setValue(True) for i in selection]
def transcode_mov(self, instance):
collection = instance.data["collection"]
stagingDir = instance.data["stagingDir"].replace("\\", "/")
file_name = collection.format("{head}mov")
review_mov = os.path.join(stagingDir, file_name).replace("\\", "/")
self.log.info("transcoding review mov: {0}".format(review_mov))
if instance.data.get("baked_colorspace_movie"):
input_movie = instance.data["baked_colorspace_movie"]
out, err = (
ffmpeg
.input(input_movie)
.output(
review_mov,
pix_fmt='yuv420p',
crf=18,
timecode="00:00:00:01"
)
.overwrite_output()
.run()
)
self.log.debug("Removing `{0}`...".format(
instance.data["baked_colorspace_movie"]))
os.remove(instance.data["baked_colorspace_movie"])
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'review',
'ext': 'mov',
'files': file_name,
"stagingDir": stagingDir,
"anatomy_template": "render",
"thumbnail": False,
"preview": True,
'startFrameReview': instance.data['startFrame'],
'endFrameReview': instance.data['endFrame'],
'frameRate': instance.context.data["framerate"]
}
instance.data["representations"].append(representation)
def render_review_representation(self,
instance,
representation="mov"):
@ -132,15 +82,20 @@ class ExtractDataForReview(pype.api.Extractor):
temporary_nodes.append(node)
reformat_node = nuke.createNode("Reformat")
reformat_node["format"].setValue("HD_1080")
reformat_node["resize"].setValue("fit")
reformat_node["filter"].setValue("Lanczos6")
reformat_node["black_outside"].setValue(True)
ref_node = self.nodes.get("Reformat", None)
if ref_node:
for k, v in ref_node:
self.log.debug("k,v: {0}:{1}".format(k,v))
if isinstance(v, unicode):
v = str(v)
reformat_node[k].setValue(v)
reformat_node.setInput(0, previous_node)
previous_node = reformat_node
temporary_nodes.append(reformat_node)
viewer_process_node = nuke.ViewerProcess.node()
viewer_process_node = instance.context.data.get("ViewerProcess")
dag_node = None
if viewer_process_node:
dag_node = nuke.createNode(viewer_process_node.Class())
@ -162,6 +117,7 @@ class ExtractDataForReview(pype.api.Extractor):
if representation in "mov":
file = fhead + "baked.mov"
name = "baked"
path = os.path.join(stagingDir, file).replace("\\", "/")
self.log.debug("Path: {}".format(path))
instance.data["baked_colorspace_movie"] = path
@ -170,11 +126,11 @@ class ExtractDataForReview(pype.api.Extractor):
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
thumbnail = False
preview = True
tags = ["review", "delete"]
elif representation in "jpeg":
file = fhead + "jpeg"
name = "thumbnail"
path = os.path.join(stagingDir, file).replace("\\", "/")
instance.data["thumbnail"] = path
write_node["file"].setValue(path)
@ -182,31 +138,29 @@ class ExtractDataForReview(pype.api.Extractor):
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
thumbnail = True
preview = False
tags = ["thumbnail"]
# retime for
first_frame = int(last_frame) / 2
last_frame = int(last_frame) / 2
# add into files for integration as representation
if "representations" not in instance.data:
instance.data["representations"] = []
repre = {
'name': representation,
'ext': representation,
'files': file,
"stagingDir": stagingDir,
"anatomy_template": "render",
"thumbnail": thumbnail,
"preview": preview
}
instance.data["representations"].append(repre)
repre = {
'name': name,
'ext': representation,
'files': file,
"stagingDir": stagingDir,
"startFrame": first_frame,
"endFrame": last_frame,
"anatomy_template": "render",
"tags": tags
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
self.log.debug("representations: {}".format(instance.data["representations"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)

View file

@ -0,0 +1,18 @@
import pyblish.api
import nuke
class ValidateActiveViewer(pyblish.api.ContextPlugin):
"""Validate presentse of the active viewer from nodes
"""
order = pyblish.api.ValidatorOrder
label = "Validate Active Viewer"
hosts = ["nuke"]
def process(self, context):
viewer_process_node = context.data.get("ViewerProcess")
assert viewer_process_node, (
"Missing active viewer process! Please click on output write node and push key number 1-9"
)

View file

@ -11,9 +11,12 @@ class RepairCollectionAction(pyblish.api.Action):
icon = "wrench"
def process(self, context, plugin):
self.log.info(context[0])
files_remove = [os.path.join(context[0].data["outputDir"], f)
for f in context[0].data["files"]]
for r in context[0].data.get("representations", [])
for f in r.get("files", [])
]
self.log.info(files_remove)
for f in files_remove:
os.remove(f)
self.log.debug("removing file: {}".format(f))
@ -38,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
if not repre.get('files'):
msg = ("no frames were collected, "
"you need to render them")
self.log.error(msg)
self.log.warning(msg)
raise ValidationException(msg)
collections, remainder = clique.assemble(repre["files"])

View file

@ -7,13 +7,13 @@ class ValidateScript(pyblish.api.InstancePlugin):
""" Validates file output. """
order = pyblish.api.ValidatorOrder + 0.1
families = ["nukescript"]
families = ["workfile"]
label = "Check script settings"
hosts = ["nuke"]
def process(self, instance):
instance_data = instance.data
asset_name = instance_data["asset"]
ctx_data = instance.context.data
asset_name = ctx_data["asset"]
asset = io.find_one({
"type": "asset",
@ -24,11 +24,11 @@ class ValidateScript(pyblish.api.InstancePlugin):
# These attributes will be checked
attributes = [
"fps", "fstart", "fend",
"resolution_width", "resolution_height", "pixel_aspect", "handles"
"resolution_width", "resolution_height", "handle_start", "handle_end"
]
# Value of these attributes can be found on parents
hierarchical_attributes = ["fps", "resolution_width", "resolution_height", "pixel_aspect", "handles"]
hierarchical_attributes = ["fps", "resolution_width", "resolution_height", "pixel_aspect", "handle_start", "handle_end"]
missing_attributes = []
asset_attributes = {}
@ -58,23 +58,27 @@ class ValidateScript(pyblish.api.InstancePlugin):
raise ValueError(message)
# Get handles from database, Default is 0 (if not found)
handles = 0
if "handles" in asset_attributes:
handles = asset_attributes["handles"]
handle_start = 0
handle_end = 0
if "handle_start" in asset_attributes:
handle_start = asset_attributes["handle_start"]
if "handle_end" in asset_attributes:
handle_end = asset_attributes["handle_end"]
# Set frame range with handles
asset_attributes["fstart"] -= handles
asset_attributes["fend"] += handles
# asset_attributes["fstart"] -= handle_start
# asset_attributes["fend"] += handle_end
# Get values from nukescript
script_attributes = {
"handles": handles,
"fps": instance_data["fps"],
"fstart": instance_data["startFrame"],
"fend": instance_data["endFrame"],
"resolution_width": instance_data["resolution_width"],
"resolution_height": instance_data["resolution_height"],
"pixel_aspect": instance_data["pixel_aspect"]
"handle_start": ctx_data["handle_start"],
"handle_end": ctx_data["handle_end"],
"fps": ctx_data["fps"],
"fstart": ctx_data["startFrame"],
"fend": ctx_data["endFrame"],
"resolution_width": ctx_data["resolution_width"],
"resolution_height": ctx_data["resolution_height"],
"pixel_aspect": ctx_data["pixel_aspect"]
}
# Compare asset's values Nukescript X Database
@ -87,14 +91,14 @@ class ValidateScript(pyblish.api.InstancePlugin):
# Raise error if not matching
if len(not_matching) > 0:
msg = "Attributes '{}' aro not set correctly"
msg = "Attributes '{}' are not set correctly"
# Alert user that handles are set if Frame start/end not match
if (
(("fstart" in not_matching) or ("fend" in not_matching)) and
(handles > 0)
((handle_start > 0) or (handle_end > 0))
):
handles = str(handles).replace(".0", "")
msg += " (handles are set to {})".format(handles)
msg += " (`handle_start` are set to {})".format(handle_start)
msg += " (`handle_end` are set to {})".format(handle_end)
message = msg.format(", ".join(not_matching))
raise ValueError(message)

View file

@ -1,5 +1,4 @@
import pyblish.api
import pype.api as pype
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
@ -8,10 +7,7 @@ class CollectCurrentFile(pyblish.api.ContextPlugin):
def process(self, context):
"""Todo, inject the current working file"""
project = context.data('activeProject')
context.data["currentFile"] = path = project.path()
context.data["version"] = pype.get_version_from_path(path)
self.log.info("currentFile: {}".format(context.data["currentFile"]))
self.log.info("version: {}".format(context.data["version"]))

View file

@ -32,16 +32,21 @@ class CollectClipHandles(api.ContextPlugin):
if instance.data.get("main"):
name = instance.data["asset"]
if assets_shared.get(name):
self.log.debug("Adding to shared assets: `{}`".format(
instance.data["name"]))
assets_shared[name].update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end
})
asset_shared = assets_shared.get(name)
else:
asset_shared = assets_shared[name]
self.log.debug("Adding to shared assets: `{}`".format(
instance.data["name"]))
asset_shared.update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end
})
for instance in filtered_instances:
if not instance.data.get("main") or not instance.data.get("handleTag"):
if not instance.data.get("main") and not instance.data.get("handleTag"):
self.log.debug("Synchronize handles on: `{}`".format(
instance.data["name"]))
name = instance.data["asset"]

View file

@ -34,6 +34,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def process(self, context):
for instance in context[:]:
assets_shared = context.data.get("assetsShared")
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
@ -139,19 +140,33 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"Clip: `{}`".format(asset)
)
assetsShared = {
asset: {
"asset": instance.data["asset"],
"hierarchy": hierarchy,
"parents": parents,
"tasks": instance.data['tasks']
}}
self.log.debug("__ assetsShared: {}".format(assetsShared))
# add formated hierarchy path into instance data
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
context.data["assetsShared"].update(
assetsShared)
# adding to asset shared dict
self.log.debug("__ assets_shared: {}".format(assets_shared))
if assets_shared.get(asset):
self.log.debug("Adding to shared assets: `{}`".format(
instance.data["name"]))
asset_shared = assets_shared.get(asset)
else:
asset_shared = assets_shared[asset]
asset_shared.update({
"asset": instance.data["asset"],
"hierarchy": hierarchy,
"parents": parents,
"tasks": instance.data["tasks"]
})
# adding frame start if any on instance
start_frame = instance.data.get("frameStart")
if start_frame:
asset_shared.update({
"frameStart": start_frame
})
class CollectHierarchyContext(pyblish.api.ContextPlugin):
@ -176,6 +191,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
def process(self, context):
instances = context[:]
sequence = context.data['activeSequence']
# create hierarchyContext attr if context has none
temp_context = {}
@ -201,6 +217,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
# adding frame start if any on instance
start_frame = s_asset_data.get("frameStart")
if start_frame:
instance.data["frameStart"] = start_frame
self.log.debug(
"__ instance.data[parents]: {}".format(
instance.data["parents"]
@ -226,8 +248,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# get custom attributes of the shot
if instance.data.get("main"):
start_frame = instance.data.get("frameStart", 0)
in_info['custom_attributes'] = {
'handles': int(instance.data.get('handles')),
'handle_start': handle_start,
@ -238,27 +258,30 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
"edit_in": int(instance.data["startFrame"]),
"edit_out": int(instance.data["endFrame"])
}
if start_frame is not 0:
in_info['custom_attributes'].update({
'fstart': start_frame,
'fend': start_frame + (
instance.data["endFrame"] - instance.data["startFrame"])
})
# adding SourceResolution if Tag was present
s_res = instance.data.get("sourceResolution")
if s_res and instance.data.get("main"):
item = instance.data["item"]
self.log.debug("TrackItem: `{0}`".format(
item))
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
self.log.info("Source Width and Height are: `{0} x {1}`".format(
width, height))
if instance.data.get("main"):
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format(
width, height, pixel_aspect))
in_info['custom_attributes'].update({
"resolution_width": width,
"resolution_height": height
"resolution_height": height,
"pixel_aspect": pixel_aspect
})
start_frame = instance.data.get("frameStart")
if start_frame:
in_info['custom_attributes'].update({
'fstart': start_frame,
'fend': start_frame + (
instance.data["endFrame"] -
instance.data["startFrame"])
})
in_info['tasks'] = instance.data['tasks']
parents = instance.data.get('parents', [])

View file

@ -25,50 +25,55 @@ class CollectShots(api.ContextPlugin):
)
continue
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
if instance.data.get("main"):
# Collect data.
data = {}
for key, value in instance.data.iteritems():
if key in "main":
continue
data[key] = value
data["family"] = "shot"
data["families"] = []
data["frameStart"] = instance.data.get("frameStart", 1)
data["family"] = "shot"
data["families"] = []
data["frameStart"] = instance.data.get("frameStart", 1)
data["subset"] = data["family"] + "Main"
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
)
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
)
# Get handles.
data["handleStart"] = instance.data["handleStart"]
data["handleEnd"] = instance.data["handleEnd"]
# Get handles.
data["handleStart"] = instance.data["handleStart"]
data["handleEnd"] = instance.data["handleEnd"]
# Frame-ranges with handles.
data["sourceInH"] = data["sourceIn"] - data["handleStart"]
data["sourceOutH"] = data["sourceOut"] + data["handleEnd"]
# Frame-ranges with handles.
data["sourceInH"] = data["sourceIn"] - data["handleStart"]
data["sourceOutH"] = data["sourceOut"] + data["handleEnd"]
# Get timeline frames.
data["timelineIn"] = int(data["item"].timelineIn())
data["timelineOut"] = int(data["item"].timelineOut())
# Get timeline frames.
data["timelineIn"] = int(data["item"].timelineIn())
data["timelineOut"] = int(data["item"].timelineOut())
# Frame-ranges with handles.
data["timelineInHandles"] = data["timelineIn"]
data["timelineInHandles"] -= data["handleStart"]
data["timelineOutHandles"] = data["timelineOut"]
data["timelineOutHandles"] += data["handleEnd"]
# Frame-ranges with handles.
data["timelineInHandles"] = data["timelineIn"]
data["timelineInHandles"] -= data["handleStart"]
data["timelineOutHandles"] = data["timelineOut"]
data["timelineOutHandles"] += data["handleEnd"]
# Creating comp frame range.
data["endFrame"] = (
data["frameStart"] + (data["sourceOut"] - data["sourceIn"])
)
# Creating comp frame range.
data["endFrame"] = (
data["frameStart"] + (data["sourceOut"] - data["sourceIn"])
)
# Get fps.
sequence = instance.context.data["activeSequence"]
data["fps"] = sequence.framerate()
# Get fps.
sequence = instance.context.data["activeSequence"]
data["fps"] = sequence.framerate()
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)
self.log.debug("_ context: {}".format(context[:]))

View file

@ -20,4 +20,5 @@ class CollectClipTagFrameStart(api.InstancePlugin):
# gets only task family tags and collect labels
if "frameStart" in t_family:
t_number = t_metadata.get("tag.number", "")
instance.data["frameStart"] = int(t_number)
start_frame = int(t_number)
instance.data["frameStart"] = start_frame

View file

@ -5,7 +5,7 @@ class CollectClipTagTypes(api.InstancePlugin):
"""Collect Types from Tags of selected track items."""
order = api.CollectorOrder + 0.012
label = "Collect Plate Type from Tag"
label = "Collect main flag"
hosts = ["nukestudio"]
families = ['clip']
@ -25,7 +25,8 @@ class CollectClipTagTypes(api.InstancePlugin):
t_subset.capitalize())
if "plateMain" in subset_name:
instance.data["main"] = True
if not instance.data.get("main"):
instance.data["main"] = True
self.log.info("`plateMain` found in instance.name: `{}`".format(
instance.data["name"]))
return

View file

@ -0,0 +1,15 @@
import pyblish.api
import pype.api as pype
class CollectWorkfileVersion(pyblish.api.ContextPlugin):
"""Inject the current working file version into context"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect workfile version"
def process(self, context):
project = context.data('activeProject')
path = project.path()
context.data["version"] = pype.get_version_from_path(path)
self.log.info("version: {}".format(context.data["version"]))

View file

@ -0,0 +1,74 @@
import pyblish
from avalon import io
from pype.action import get_errored_instances_from_context
import pype.api as pype
@pyblish.api.log
class RepairNukestudioVersionUp(pyblish.api.Action):
label = "Version Up Workfile"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
if instances:
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
if project:
project.saveAs(new_path)
self.log.info("Project workfile version was fixed")
class ValidateVersion(pyblish.api.InstancePlugin):
"""Validate clip's versions.
"""
order = pyblish.api.ValidatorOrder
families = ["plate"]
label = "Validate Version"
actions = [RepairNukestudioVersionUp]
hosts = ["nukestudio"]
def process(self, instance):
version = int(instance.data.get("version", 0))
asset_name = instance.data.get("asset", None)
subset_name = instance.data.get("subset", None)
assert version, "The file is missing version string! example: filename_v001.hrox `{}`"
self.log.debug("Collected version: `{0}`".format(version))
found_v = 0
try:
io.install()
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": asset_name,
"parent": project["_id"]})
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": subset_name})
version_db = io.find_one({
'type': 'version',
'parent': subset["_id"],
'name': version
}) or {}
found_v = version_db.get("name", 0)
self.log.debug("Found version: `{0}`".format(found_v))
except Exception as e:
self.log.debug("Problem to get data from database for asset `{0}` subset `{1}`. Error: `{2}`".format(asset_name, subset_name, e))
assert (found_v != version), "Version must not be the same as in database `{0}`, Versions file: `{1}`, db: `{2}`".format(asset_name, version, found_v)

View file

@ -0,0 +1,23 @@
from pyblish import api
import pype.api as pype
class VersionUpWorkfile(api.ContextPlugin):
"""Save as new workfile version"""
order = api.IntegratorOrder + 10.1
label = "Version-up Workfile"
hosts = ["nukestudio"]
optional = True
active = True
def process(self, context):
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
if project:
project.saveAs(new_path)
self.log.info("Project workfile was versioned up")

View file

@ -96,9 +96,6 @@ def install():
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
# load data from templates
api.load_data_from_templates()
# synchronize extensions
extensions_sync()
message(title="pyblish_paths", message=str(reg_paths), level="info")
@ -109,6 +106,3 @@ def uninstall():
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
# reset data from templates
api.reset_data_from_templates()

View file

@ -1,10 +1,12 @@
from pype import api as pype
from pypeapp import Anatomy, config
log = pype.Logger().get_logger(__name__, "premiere")
def get_anatomy(**kwarg):
return pype.Anatomy
return Anatomy()
def get_dataflow(**kwarg):
@ -15,7 +17,8 @@ def get_dataflow(**kwarg):
assert any([host, cls]), log.error("premiera.templates.get_dataflow():"
"Missing mandatory kwargs `host`, `cls`")
pr_dataflow = getattr(pype.Dataflow, str(host), None)
presets = config.get_init_presets()
pr_dataflow = getattr(presets["dataflow"], str(host), None)
pr_dataflow_node = getattr(pr_dataflow.nodes, str(cls), None)
if preset:
pr_dataflow_node = getattr(pr_dataflow_node, str(preset), None)
@ -32,7 +35,8 @@ def get_colorspace(**kwarg):
assert any([host, cls]), log.error("premiera.templates.get_colorspace():"
"Missing mandatory kwargs `host`, `cls`")
pr_colorspace = getattr(pype.Colorspace, str(host), None)
presets = config.get_init_presets()
pr_colorspace = getattr(presets["colorspace"], str(host), None)
pr_colorspace_node = getattr(pr_colorspace, str(cls), None)
if preset:
pr_colorspace_node = getattr(pr_colorspace_node, str(preset), None)

View file

@ -37,8 +37,8 @@ class TimersManager(metaclass=Singleton):
def set_signal_times(self):
try:
timer_info = get_presets()['services']['timers_manager']['timer']
full_time = int(timer_info['full_time'])*60
message_time = int(timer_info['message_time'])*60
full_time = int(float(timer_info['full_time'])*60)
message_time = int(float(timer_info['message_time'])*60)
self.time_show_message = full_time - message_time
self.time_stop_timer = full_time
return True
@ -78,7 +78,15 @@ class TimersManager(metaclass=Singleton):
'task_name': 'Lookdev BG'
}
'''
if len(data['hierarchy']) < 1:
self.log.error((
'Not allowed action in Pype!!'
' Timer has been launched on task which is child of Project.'
))
return
self.last_task = data
for module in self.modules:
module.start_timer_manager(data)
self.is_running = True

View file

@ -4,7 +4,7 @@ import sys
from avalon import io, api as avalon, lib as avalonlib
from . import lib
# from pypeapp.api import (Templates, Logger, format)
from pypeapp import Logger, config, Anatomy
from pypeapp import Logger, Anatomy
log = Logger().get_logger(__name__, os.getenv("AVALON_APP", "pype-config"))
@ -17,63 +17,6 @@ def set_session():
self.SESSION = avalon.session
def load_data_from_templates():
"""
Load Presets and Anatomy `contextual` data as singleton object
[info](https://en.wikipedia.org/wiki/Singleton_pattern)
Returns:
singleton: adding data to sharable object variable
"""
from . import api
if not any([
api.Dataflow,
api.Anatomy,
api.Colorspace
]
):
presets = config.get_presets()
anatomy = Anatomy()
try:
# try if it is not in projects custom directory
# `{PYPE_PROJECT_CONFIGS}/[PROJECT_NAME]/init.json`
# init.json define preset names to be used
p_init = presets["init"]
colorspace = presets["colorspace"][p_init["colorspace"]]
dataflow = presets["dataflow"][p_init["dataflow"]]
except KeyError:
log.warning("No projects custom preset available...")
colorspace = presets["colorspace"]["default"]
dataflow = presets["dataflow"]["default"]
log.info("Presets `colorspace` and `dataflow` loaded from `default`...")
api.Anatomy = anatomy
api.Dataflow = dataflow
api.Colorspace = colorspace
log.info("Data from templates were Loaded...")
def reset_data_from_templates():
"""
Clear Templates `contextual` data from singleton
object variable
Returns:
singleton: clearing data to None
"""
from . import api
api.Dataflow = None
api.Anatomy = None
api.Colorspace = None
log.info("Data from templates were Unloaded...")
def get_version_from_path(file):
"""
Finds version number in file path string
@ -85,7 +28,7 @@ def get_version_from_path(file):
v: version number in string ('001')
"""
pattern = re.compile(r"_v([0-9]*)")
pattern = re.compile(r"[\._]v([0-9]*)")
try:
v = pattern.findall(file)[0]
return v
@ -265,7 +208,9 @@ def set_avalon_workdir(project=None,
if self.SESSION is None:
set_session()
awd = self.SESSION.get("AVALON_WORKDIR", None) or os.getenv("AVALON_WORKDIR", None)
awd = self.SESSION.get("AVALON_WORKDIR", None) or \
os.getenv("AVALON_WORKDIR", None)
data = get_context_data(project, hierarchy, asset, task)
if (not awd) or ("{" not in awd):
@ -280,7 +225,7 @@ def set_avalon_workdir(project=None,
def get_workdir_template(data=None):
"""
Obtain workdir templated path from api.Anatomy singleton
Obtain workdir templated path from Anatomy()
Args:
data (dict, optional): basic contextual data
@ -288,12 +233,8 @@ def get_workdir_template(data=None):
Returns:
string: template path
"""
from . import api
""" Installs singleton data """
load_data_from_templates()
anatomy = api.Anatomy
anatomy = Anatomy()
anatomy_filled = anatomy.format(data or get_context_data())
try: