Merge branch 'develop' into hotfix/publish_new_writes

# Conflicts:
#	pype/plugins/nuke/publish/collect_writes.py
This commit is contained in:
Toke Jepsen 2019-08-05 09:00:39 +01:00
commit 4dbceaa87d
128 changed files with 2159 additions and 2365 deletions

View file

@ -18,29 +18,14 @@ from .action import (
from pypeapp import Logger
from .templates import (
get_project_name,
get_project_code,
get_hierarchy,
get_asset,
get_task,
set_avalon_workdir,
get_version_from_path,
get_workdir_template,
set_hierarchy,
set_project_code
)
from .lib import (
version_up,
get_handle_irregular,
get_project_data,
get_asset_data,
get_asset,
get_project,
get_hierarchy,
get_version_from_path,
modified_environ,
add_tool_to_environment,
get_data_hierarchical_attr,
get_avalon_project_template
add_tool_to_environment
)
# Special naming case for subprocess since its a built-in method.
@ -65,23 +50,12 @@ __all__ = [
# get contextual data
"version_up",
"get_handle_irregular",
"get_project_data",
"get_asset_data",
"get_project_name",
"get_project_code",
"get_project",
"get_hierarchy",
"get_asset",
"get_task",
"set_avalon_workdir",
"get_version_from_path",
"get_workdir_template",
"modified_environ",
"add_tool_to_environment",
"set_hierarchy",
"set_project_code",
"get_data_hierarchical_attr",
"get_avalon_project_template",
"subprocess"
"subprocess"
]

View file

@ -6,6 +6,7 @@ from pyblish import api as pyblish
from pypeapp import execute, Logger
from .. import api
from .lib import set_avalon_workdir
log = Logger().get_logger(__name__, "aport")
@ -33,7 +34,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "aport", "inventory")
def install():
api.set_avalon_workdir()
set_avalon_workdir()
log.info("Registering Aport plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)

View file

@ -80,17 +80,23 @@ def publish(json_data_path, gui):
@pico.expose()
def context(project, asset, task, app):
def context(project_name, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
os.environ["AVALON_PROJECT"] = project_name
io.Session["AVALON_PROJECT"] = project_name
avalon.update_current_task(task, asset, app)
project_code = pype.get_project_code()
pype.set_project_code(project_code)
project_code = pype.get_project()["data"].get("code", '')
os.environ["AVALON_PROJECTCODE"] = project_code
io.Session["AVALON_PROJECTCODE"] = project_code
hierarchy = pype.get_hierarchy()
pype.set_hierarchy(hierarchy)
os.environ["AVALON_HIERARCHY"] = hierarchy
io.Session["AVALON_HIERARCHY"] = hierarchy
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)

135
pype/aport/lib.py Normal file
View file

@ -0,0 +1,135 @@
import os
import re
import sys
from avalon import io, api as avalon, lib as avalonlib
from pype import lib
from pype import api as pype
# from pypeapp.api import (Templates, Logger, format)
from pypeapp import Logger, Anatomy
log = Logger().get_logger(__name__, os.getenv("AVALON_APP", "pype-config"))
def get_asset():
"""
Obtain Asset string from session or environment variable
Returns:
string: asset name
Raises:
log: error
"""
lib.set_io_database()
asset = io.Session.get("AVALON_ASSET", None) \
or os.getenv("AVALON_ASSET", None)
log.info("asset: {}".format(asset))
assert asset, log.error("missing `AVALON_ASSET`"
"in avalon session "
"or os.environ!")
return asset
def get_context_data(
project_name=None, hierarchy=None, asset=None, task_name=None
):
"""
Collect all main contextual data
Args:
project (string, optional): project name
hierarchy (string, optional): hierarchy path
asset (string, optional): asset name
task (string, optional): task name
Returns:
dict: contextual data
"""
if not task_name:
lib.set_io_database()
task_name = io.Session.get("AVALON_TASK", None) \
or os.getenv("AVALON_TASK", None)
assert task_name, log.error(
"missing `AVALON_TASK` in avalon session or os.environ!"
)
application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
os.environ['AVALON_PROJECT'] = project_name
io.Session['AVALON_PROJECT'] = project_name
if not hierarchy:
hierarchy = pype.get_hierarchy()
project_doc = io.find_one({"type": "project"})
data = {
"task": task_name,
"asset": asset or get_asset(),
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code", '')
},
"hierarchy": hierarchy,
"app": application["application_dir"]
}
return data
def set_avalon_workdir(
project=None, hierarchy=None, asset=None, task=None
):
"""
Updates os.environ and session with filled workdir
Args:
project (string, optional): project name
hierarchy (string, optional): hierarchy path
asset (string, optional): asset name
task (string, optional): task name
Returns:
os.environ[AVALON_WORKDIR]: workdir path
avalon.session[AVALON_WORKDIR]: workdir path
"""
lib.set_io_database()
awd = io.Session.get("AVALON_WORKDIR", None) or \
os.getenv("AVALON_WORKDIR", None)
data = get_context_data(project, hierarchy, asset, task)
if (not awd) or ("{" not in awd):
anatomy_filled = Anatomy(io.Session["AVALON_PROJECT"]).format(data)
awd = anatomy_filled["work"]["folder"]
awd_filled = os.path.normpath(format(awd, data))
io.Session["AVALON_WORKDIR"] = awd_filled
os.environ["AVALON_WORKDIR"] = awd_filled
log.info("`AVALON_WORKDIR` fixed to: {}".format(awd_filled))
def get_workdir_template(data=None):
"""
Obtain workdir templated path from Anatomy()
Args:
data (dict, optional): basic contextual data
Returns:
string: template path
"""
anatomy = Anatomy()
anatomy_filled = anatomy.format(data or get_context_data())
try:
work = anatomy_filled["work"]
except Exception as e:
log.error(
"{0} Error in get_workdir_template(): {1}".format(__name__, str(e))
)
return work

View file

@ -82,13 +82,19 @@ def context(project, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
io.Session["AVALON_PROJECT"] = project
avalon.update_current_task(task, asset, app)
project_code = pype.get_project_code()
pype.set_project_code(project_code)
project_code = pype.get_project()["data"].get("code", '')
os.environ["AVALON_PROJECTCODE"] = project_code
io.Session["AVALON_PROJECTCODE"] = project_code
hierarchy = pype.get_hierarchy()
pype.set_hierarchy(hierarchy)
os.environ["AVALON_HIERARCHY"] = hierarchy
io.Session["AVALON_HIERARCHY"] = hierarchy
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)

View file

@ -81,13 +81,19 @@ def context(project, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
io.Session["AVALON_PROJECT"] = project
avalon.update_current_task(task, asset, app)
project_code = pype.get_project_code()
pype.set_project_code(project_code)
project_code = pype.get_project()["data"].get("code", '')
os.environ["AVALON_PROJECTCODE"] = project_code
io.Session["AVALON_PROJECTCODE"] = project_code
hierarchy = pype.get_hierarchy()
pype.set_hierarchy(hierarchy)
os.environ["AVALON_HIERARCHY"] = hierarchy
io.Session["AVALON_HIERARCHY"] = hierarchy
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)

View file

@ -0,0 +1,283 @@
import os
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
class AttributesRemapper(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = 'attributes.remapper'
#: Action label.
label = 'Attributes Remapper'
#: Action description.
description = 'Remaps attributes in avalon DB'
#: roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator"]
icon = '{}/ftrack/action_icons/AttributesRemapper.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
db_con = DbConnector()
keys_to_change = {
"fstart": "frameStart",
"startFrame": "frameStart",
"edit_in": "frameStart",
"fend": "frameEnd",
"endFrame": "frameEnd",
"edit_out": "frameEnd",
"handle_start": "handleStart",
"handle_end": "handleEnd",
"handles": ["handleEnd", "handleStart"],
"frameRate": "fps",
"framerate": "fps",
"resolution_width": "resolutionWidth",
"resolution_height": "resolutionHeight",
"pixel_aspect": "pixelAspect"
}
def discover(self, session, entities, event):
''' Validation '''
return True
def interface(self, session, entities, event):
if event['data'].get('values', {}):
return
title = 'Select Projects where attributes should be remapped'
items = []
selection_enum = {
'label': 'Process type',
'type': 'enumerator',
'name': 'process_type',
'data': [
{
'label': 'Selection',
'value': 'selection'
}, {
'label': 'Inverted selection',
'value': 'except'
}
],
'value': 'selection'
}
selection_label = {
'type': 'label',
'value': (
'Selection based variants:<br/>'
'- `Selection` - '
'NOTHING is processed when nothing is selected<br/>'
'- `Inverted selection` - '
'ALL Projects are processed when nothing is selected'
)
}
items.append(selection_enum)
items.append(selection_label)
item_splitter = {'type': 'label', 'value': '---'}
all_projects = session.query('Project').all()
for project in all_projects:
item_label = {
'type': 'label',
'value': '{} (<i>{}</i>)'.format(
project['full_name'], project['name']
)
}
item = {
'name': project['id'],
'type': 'boolean',
'value': False
}
if len(items) > 0:
items.append(item_splitter)
items.append(item_label)
items.append(item)
if len(items) == 0:
return {
'success': False,
'message': 'Didn\'t found any projects'
}
else:
return {
'items': items,
'title': title
}
def launch(self, session, entities, event):
if 'values' not in event['data']:
return
values = event['data']['values']
process_type = values.pop('process_type')
selection = True
if process_type == 'except':
selection = False
interface_messages = {}
projects_to_update = []
for project_id, update_bool in values.items():
if not update_bool and selection:
continue
if update_bool and not selection:
continue
project = session.query(
'Project where id is "{}"'.format(project_id)
).one()
projects_to_update.append(project)
if not projects_to_update:
self.log.debug('Nothing to update')
return {
'success': True,
'message': 'Nothing to update'
}
self.db_con.install()
relevant_types = ["project", "asset", "version"]
for ft_project in projects_to_update:
self.log.debug(
"Processing project \"{}\"".format(ft_project["full_name"])
)
self.db_con.Session["AVALON_PROJECT"] = ft_project["full_name"]
project = self.db_con.find_one({'type': 'project'})
if not project:
key = "Projects not synchronized to db"
if key not in interface_messages:
interface_messages[key] = []
interface_messages[key].append(ft_project["full_name"])
continue
# Get all entities in project collection from MongoDB
_entities = self.db_con.find({})
for _entity in _entities:
ent_t = _entity.get("type", "*unknown type")
name = _entity.get("name", "*unknown name")
self.log.debug(
"- {} ({})".format(name, ent_t)
)
# Skip types that do not store keys to change
if ent_t.lower() not in relevant_types:
self.log.debug("-- skipping - type is not relevant")
continue
# Get data which will change
updating_data = {}
source_data = _entity["data"]
for key_from, key_to in self.keys_to_change.items():
# continue if final key already exists
if type(key_to) == list:
for key in key_to:
# continue if final key was set in update_data
if key in updating_data:
continue
# continue if source key not exist or value is None
value = source_data.get(key_from)
if value is None:
continue
self.log.debug(
"-- changing key {} to {}".format(
key_from,
key
)
)
updating_data[key] = value
else:
if key_to in source_data:
continue
# continue if final key was set in update_data
if key_to in updating_data:
continue
# continue if source key not exist or value is None
value = source_data.get(key_from)
if value is None:
continue
self.log.debug(
"-- changing key {} to {}".format(key_from, key_to)
)
updating_data[key_to] = value
# Pop out old keys from entity
is_obsolete = False
for key in self.keys_to_change:
if key not in source_data:
continue
is_obsolete = True
source_data.pop(key)
# continue if there is nothing to change
if not is_obsolete and not updating_data:
self.log.debug("-- nothing to change")
continue
source_data.update(updating_data)
self.db_con.update_many(
{"_id": _entity["_id"]},
{"$set": {"data": source_data}}
)
self.db_con.uninstall()
if interface_messages:
self.show_interface_from_dict(
event, interface_messages, "Errors during remapping attributes"
)
return True
def show_interface_from_dict(self, event, messages, title=""):
items = []
for key, value in messages.items():
if not value:
continue
subtitle = {'type': 'label', 'value': '# {}'.format(key)}
items.append(subtitle)
if isinstance(value, list):
for item in value:
message = {
'type': 'label', 'value': '<p>{}</p>'.format(item)
}
items.append(message)
else:
message = {'type': 'label', 'value': '<p>{}</p>'.format(value)}
items.append(message)
self.show_interface(event, items, title)
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
AttributesRemapper(session).register()

View file

@ -7,6 +7,7 @@ import logging
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, get_ca_mongoid
from pypeapp import config
from ftrack_api.exception import NoResultFoundError
"""
This action creates/updates custom attributes.
@ -118,18 +119,11 @@ class CustomAttributes(BaseAction):
os.environ.get('PYPE_STATICS_SERVER', '')
)
def __init__(self, session):
super().__init__(session)
self.types = {}
self.object_type_ids = {}
self.groups = {}
self.security_roles = {}
self.required_keys = ['key', 'label', 'type']
self.type_posibilities = [
'text', 'boolean', 'date', 'enumerator',
'dynamic enumerator', 'number'
]
required_keys = ['key', 'label', 'type']
type_posibilities = [
'text', 'boolean', 'date', 'enumerator',
'dynamic enumerator', 'number'
]
def discover(self, session, entities, event):
'''
@ -139,8 +133,12 @@ class CustomAttributes(BaseAction):
return True
def launch(self, session, entities, event):
# JOB SETTINGS
self.types = {}
self.object_type_ids = {}
self.groups = {}
self.security_roles = {}
# JOB SETTINGS
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
@ -159,11 +157,14 @@ class CustomAttributes(BaseAction):
job['status'] = 'done'
session.commit()
except Exception as e:
except Exception as exc:
session.rollback()
job['status'] = 'failed'
session.commit()
self.log.error('Creating custom attributes failed ({})'.format(e))
self.log.error(
'Creating custom attributes failed ({})'.format(exc),
exc_info=True
)
return True
@ -226,24 +227,30 @@ class CustomAttributes(BaseAction):
def custom_attributes_from_file(self, session, event):
presets = config.get_presets()['ftrack']['ftrack_custom_attributes']
for cust_attr_name in presets:
for cust_attr_data in presets:
cust_attr_name = cust_attr_data.get(
'label',
cust_attr_data.get('key')
)
try:
data = {}
cust_attr = presets[cust_attr_name]
# Get key, label, type
data.update(self.get_required(cust_attr))
data.update(self.get_required(cust_attr_data))
# Get hierachical/ entity_type/ object_id
data.update(self.get_entity_type(cust_attr))
data.update(self.get_entity_type(cust_attr_data))
# Get group, default, security roles
data.update(self.get_optional(cust_attr))
data.update(self.get_optional(cust_attr_data))
# Process data
self.process_attribute(data)
except CustAttrException as cae:
msg = 'Custom attribute error "{}" - {}'.format(
cust_attr_name, str(cae)
)
self.log.warning(msg)
if cust_attr_name:
msg = 'Custom attribute error "{}" - {}'.format(
cust_attr_name, str(cae)
)
else:
msg = 'Custom attribute error - {}'.format(str(cae))
self.log.warning(msg, exc_info=True)
self.show_message(event, msg)
return True
@ -422,9 +429,10 @@ class CustomAttributes(BaseAction):
def get_security_role(self, security_roles):
roles = []
if len(security_roles) == 0 or security_roles[0] == 'ALL':
security_roles_lowered = [role.lower() for role in security_roles]
if len(security_roles) == 0 or 'all' in security_roles_lowered:
roles = self.get_role_ALL()
elif security_roles[0] == 'except':
elif security_roles_lowered[0] == 'except':
excepts = security_roles[1:]
all = self.get_role_ALL()
for role in all:
@ -443,10 +451,10 @@ class CustomAttributes(BaseAction):
role = self.session.query(query).one()
self.security_roles[role_name] = role
roles.append(role)
except Exception:
raise CustAttrException(
'Securit role "{}" does not exist'.format(role_name)
)
except NoResultFoundError:
raise CustAttrException((
'Securit role "{}" does not exist'
).format(role_name))
return roles

View file

@ -28,7 +28,7 @@ class SyncHierarchicalAttrs(BaseAction):
)
#: roles that are allowed to register this action
role_list = ['Administrator']
role_list = ['Pypeclub', 'Administrator', 'Project Manager']
def discover(self, session, entities, event):
''' Validation '''
@ -41,6 +41,7 @@ class SyncHierarchicalAttrs(BaseAction):
return False
def launch(self, session, entities, event):
self.interface_messages = {}
user = session.query(
'User where id is "{}"'.format(event['source']['user']['id'])
).one()
@ -53,13 +54,27 @@ class SyncHierarchicalAttrs(BaseAction):
})
})
session.commit()
self.log.debug('Job with id "{}" created'.format(job['id']))
process_session = ftrack_api.Session(
server_url=session.server_url,
api_key=session.api_key,
api_user=session.api_user,
auto_connect_event_hub=True
)
try:
# Collect hierarchical attrs
self.log.debug('Collecting Hierarchical custom attributes started')
custom_attributes = {}
all_avalon_attr = session.query(
all_avalon_attr = process_session.query(
'CustomAttributeGroup where name is "avalon"'
).one()
error_key = (
'Hierarchical attributes with set "default" value (not allowed)'
)
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
if 'avalon_' in cust_attr['key']:
continue
@ -68,6 +83,12 @@ class SyncHierarchicalAttrs(BaseAction):
continue
if cust_attr['default']:
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
self.interface_messages[error_key].append(
cust_attr['label']
)
self.log.warning((
'Custom attribute "{}" has set default value.'
' This attribute can\'t be synchronized'
@ -76,6 +97,10 @@ class SyncHierarchicalAttrs(BaseAction):
custom_attributes[cust_attr['key']] = cust_attr
self.log.debug(
'Collecting Hierarchical custom attributes has finished'
)
if not custom_attributes:
msg = 'No hierarchical attributes to sync.'
self.log.debug(msg)
@ -93,28 +118,61 @@ class SyncHierarchicalAttrs(BaseAction):
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = project_name
for entity in entities:
_entities = self._get_entities(event, process_session)
for entity in _entities:
self.log.debug(30*'-')
self.log.debug(
'Processing entity "{}"'.format(entity.get('name', entity))
)
ent_name = entity.get('name', entity)
if entity.entity_type.lower() == 'project':
ent_name = entity['full_name']
for key in custom_attributes:
self.log.debug(30*'*')
self.log.debug(
'Processing Custom attribute key "{}"'.format(key)
)
# check if entity has that attribute
if key not in entity['custom_attributes']:
self.log.debug(
'Hierachical attribute "{}" not found on "{}"'.format(
key, entity.get('name', entity)
)
error_key = 'Missing key on entities'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
self.interface_messages[error_key].append(
'- key: "{}" - entity: "{}"'.format(key, ent_name)
)
self.log.error((
'- key "{}" not found on "{}"'
).format(key, ent_name))
continue
value = self.get_hierarchical_value(key, entity)
if value is None:
self.log.warning(
'Hierarchical attribute "{}" not set on "{}"'.format(
key, entity.get('name', entity)
)
error_key = (
'Missing value for key on entity'
' and its parents (synchronization was skipped)'
)
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
self.interface_messages[error_key].append(
'- key: "{}" - entity: "{}"'.format(key, ent_name)
)
self.log.warning((
'- key "{}" not set on "{}" or its parents'
).format(key, ent_name))
continue
self.update_hierarchical_attribute(entity, key, value)
job['status'] = 'done'
session.commit()
except Exception:
self.log.error(
'Action "{}" failed'.format(self.label),
@ -127,6 +185,8 @@ class SyncHierarchicalAttrs(BaseAction):
if job['status'] in ('queued', 'running'):
job['status'] = 'failed'
session.commit()
if self.interface_messages:
self.show_interface_from_dict(self.interface_messages, event)
return True
@ -146,6 +206,27 @@ class SyncHierarchicalAttrs(BaseAction):
entity.entity_type.lower() == 'task'
):
return
ent_name = entity.get('name', entity)
if entity.entity_type.lower() == 'project':
ent_name = entity['full_name']
hierarchy = '/'.join(
[a['name'] for a in entity.get('ancestors', [])]
)
if hierarchy:
hierarchy = '/'.join(
[entity['project']['full_name'], hierarchy, entity['name']]
)
elif entity.entity_type.lower() == 'project':
hierarchy = entity['full_name']
else:
hierarchy = '/'.join(
[entity['project']['full_name'], entity['name']]
)
self.log.debug('- updating entity "{}"'.format(hierarchy))
# collect entity's custom attributes
custom_attributes = entity.get('custom_attributes')
if not custom_attributes:
@ -153,24 +234,49 @@ class SyncHierarchicalAttrs(BaseAction):
mongoid = custom_attributes.get(self.ca_mongoid)
if not mongoid:
self.log.debug('Entity "{}" is not synchronized to avalon.'.format(
entity.get('name', entity)
))
error_key = 'Missing MongoID on entities (try SyncToAvalon first)'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
if ent_name not in self.interface_messages[error_key]:
self.interface_messages[error_key].append(ent_name)
self.log.warning(
'-- entity "{}" is not synchronized to avalon. Skipping'.format(
ent_name
)
)
return
try:
mongoid = ObjectId(mongoid)
except Exception:
self.log.warning('Entity "{}" has stored invalid MongoID.'.format(
entity.get('name', entity)
))
error_key = 'Invalid MongoID on entities (try SyncToAvalon)'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
if ent_name not in self.interface_messages[error_key]:
self.interface_messages[error_key].append(ent_name)
self.log.warning(
'-- entity "{}" has stored invalid MongoID. Skipping'.format(
ent_name
)
)
return
# Find entity in Mongo DB
mongo_entity = self.db_con.find_one({'_id': mongoid})
if not mongo_entity:
error_key = 'Entities not found in Avalon DB (try SyncToAvalon)'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
if ent_name not in self.interface_messages[error_key]:
self.interface_messages[error_key].append(ent_name)
self.log.warning(
'Entity "{}" is not synchronized to avalon.'.format(
entity.get('name', entity)
'-- entity "{}" was not found in DB by id "{}". Skipping'.format(
ent_name, str(mongoid)
)
)
return
@ -188,6 +294,10 @@ class SyncHierarchicalAttrs(BaseAction):
{'$set': {'data': data}}
)
self.log.debug(
'-- stored value "{}"'.format(value)
)
for child in entity.get('children', []):
self.update_hierarchical_attribute(child, key, value)

View file

@ -11,12 +11,10 @@ from pype.ftrack import BaseAction
from avalon import io, inventory, schema
ignore_me = True
class TestAction(BaseAction):
'''Edit meta data action.'''
ignore_me = True
#: Action identifier.
identifier = 'test.action'
#: Action label.

View file

@ -0,0 +1,54 @@
import os
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
from pype.vendor.ftrack_api import session as fa_session
class ActionAskWhereIRun(BaseAction):
""" Sometimes user forget where pipeline with his credentials is running.
- this action triggers `ActionShowWhereIRun`
"""
# Action is ignored by default
ignore_me = True
#: Action identifier.
identifier = 'ask.where.i.run'
#: Action label.
label = 'Ask where I run'
#: Action description.
description = 'Triggers PC info where user have running Pype'
#: Action icon
icon = '{}/ftrack/action_icons/ActionAskWhereIRun.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
def discover(self, session, entities, event):
""" Hide by default - Should be enabled only if you want to run.
- best practise is to create another action that triggers this one
"""
return True
def launch(self, session, entities, event):
event = fa_session.ftrack_api.event.base.Event(
topic='ftrack.action.launch',
data=dict(
actionIdentifier="show.where.i.run",
selection=event["data"]["selection"],
event_hub_id=session.event_hub.id
),
source=dict(
user=dict(username=session.api_user)
)
)
session.event_hub.publish(event, on_error='ignore')
return True
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
ActionAskWhereIRun(session).register()

View file

@ -0,0 +1,86 @@
import platform
import socket
import getpass
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
class ActionShowWhereIRun(BaseAction):
""" Sometimes user forget where pipeline with his credentials is running.
- this action shows on which PC, Username and IP is running
- requirement action MUST be registered where we want to locate the PC:
- - can't be used retrospectively...
"""
#: Action identifier.
identifier = 'show.where.i.run'
#: Action label.
label = 'Show where I run'
#: Action description.
description = 'Shows PC info where user have running Pype'
def discover(self, session, entities, event):
""" Hide by default - Should be enabled only if you want to run.
- best practise is to create another action that triggers this one
"""
return False
def launch(self, session, entities, event):
# Don't show info when was launch from this session
if session.event_hub.id == event.get("data", {}).get("event_hub_id"):
return True
title = "Where Do I Run?"
msgs = {}
all_keys = ["Hostname", "IP", "Username", "System name", "PC name"]
try:
host_name = socket.gethostname()
msgs["Hostname"] = host_name
host_ip = socket.gethostbyname(host_name)
msgs["IP"] = host_ip
except Exception:
pass
try:
system_name, pc_name, *_ = platform.uname()
msgs["System name"] = system_name
msgs["PC name"] = pc_name
except Exception:
pass
try:
msgs["Username"] = getpass.getuser()
except Exception:
pass
for key in all_keys:
if not msgs.get(key):
msgs[key] = "-Undefined-"
items = []
first = True
splitter = {'type': 'label', 'value': '---'}
for key, value in msgs.items():
if first:
first = False
else:
items.append(splitter)
self.log.debug("{}: {}".format(key, value))
subtitle = {'type': 'label', 'value': '<h3>{}</h3>'.format(key)}
items.append(subtitle)
message = {'type': 'label', 'value': '<p>{}</p>'.format(value)}
items.append(message)
self.show_interface(items, title, event=event)
return True
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
ActionShowWhereIRun(session).register()

View file

@ -61,7 +61,7 @@ class SyncHierarchicalAttrs(BaseAction):
if role['security_role']['name'] in role_list:
role_check = True
break
print(self.icon)
if role_check is True:
for entity in entities:
context_type = entity.get('context_type', '').lower()
@ -75,6 +75,8 @@ class SyncHierarchicalAttrs(BaseAction):
return discover
def launch(self, session, entities, event):
self.interface_messages = {}
user = session.query(
'User where id is "{}"'.format(event['source']['user']['id'])
).one()
@ -87,13 +89,26 @@ class SyncHierarchicalAttrs(BaseAction):
})
})
session.commit()
self.log.debug('Job with id "{}" created'.format(job['id']))
process_session = ftrack_api.Session(
server_url=session.server_url,
api_key=session.api_key,
api_user=session.api_user,
auto_connect_event_hub=True
)
try:
# Collect hierarchical attrs
self.log.debug('Collecting Hierarchical custom attributes started')
custom_attributes = {}
all_avalon_attr = session.query(
all_avalon_attr = process_session.query(
'CustomAttributeGroup where name is "avalon"'
).one()
error_key = (
'Hierarchical attributes with set "default" value (not allowed)'
)
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
if 'avalon_' in cust_attr['key']:
continue
@ -102,6 +117,12 @@ class SyncHierarchicalAttrs(BaseAction):
continue
if cust_attr['default']:
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
self.interface_messages[error_key].append(
cust_attr['label']
)
self.log.warning((
'Custom attribute "{}" has set default value.'
' This attribute can\'t be synchronized'
@ -110,6 +131,10 @@ class SyncHierarchicalAttrs(BaseAction):
custom_attributes[cust_attr['key']] = cust_attr
self.log.debug(
'Collecting Hierarchical custom attributes has finished'
)
if not custom_attributes:
msg = 'No hierarchical attributes to sync.'
self.log.debug(msg)
@ -127,28 +152,61 @@ class SyncHierarchicalAttrs(BaseAction):
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = project_name
for entity in entities:
_entities = self._get_entities(event, process_session)
for entity in _entities:
self.log.debug(30*'-')
self.log.debug(
'Processing entity "{}"'.format(entity.get('name', entity))
)
ent_name = entity.get('name', entity)
if entity.entity_type.lower() == 'project':
ent_name = entity['full_name']
for key in custom_attributes:
self.log.debug(30*'*')
self.log.debug(
'Processing Custom attribute key "{}"'.format(key)
)
# check if entity has that attribute
if key not in entity['custom_attributes']:
self.log.debug(
'Hierachical attribute "{}" not found on "{}"'.format(
key, entity.get('name', entity)
)
error_key = 'Missing key on entities'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
self.interface_messages[error_key].append(
'- key: "{}" - entity: "{}"'.format(key, ent_name)
)
self.log.error((
'- key "{}" not found on "{}"'
).format(key, entity.get('name', entity)))
continue
value = self.get_hierarchical_value(key, entity)
if value is None:
self.log.warning(
'Hierarchical attribute "{}" not set on "{}"'.format(
key, entity.get('name', entity)
)
error_key = (
'Missing value for key on entity'
' and its parents (synchronization was skipped)'
)
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
self.interface_messages[error_key].append(
'- key: "{}" - entity: "{}"'.format(key, ent_name)
)
self.log.warning((
'- key "{}" not set on "{}" or its parents'
).format(key, ent_name))
continue
self.update_hierarchical_attribute(entity, key, value)
job['status'] = 'done'
session.commit()
except Exception:
self.log.error(
'Action "{}" failed'.format(self.label),
@ -161,6 +219,9 @@ class SyncHierarchicalAttrs(BaseAction):
if job['status'] in ('queued', 'running'):
job['status'] = 'failed'
session.commit()
if self.interface_messages:
self.show_interface_from_dict(self.interface_messages, event)
return True
@ -180,6 +241,27 @@ class SyncHierarchicalAttrs(BaseAction):
entity.entity_type.lower() == 'task'
):
return
ent_name = entity.get('name', entity)
if entity.entity_type.lower() == 'project':
ent_name = entity['full_name']
hierarchy = '/'.join(
[a['name'] for a in entity.get('ancestors', [])]
)
if hierarchy:
hierarchy = '/'.join(
[entity['project']['full_name'], hierarchy, entity['name']]
)
elif entity.entity_type.lower() == 'project':
hierarchy = entity['full_name']
else:
hierarchy = '/'.join(
[entity['project']['full_name'], entity['name']]
)
self.log.debug('- updating entity "{}"'.format(hierarchy))
# collect entity's custom attributes
custom_attributes = entity.get('custom_attributes')
if not custom_attributes:
@ -187,24 +269,49 @@ class SyncHierarchicalAttrs(BaseAction):
mongoid = custom_attributes.get(self.ca_mongoid)
if not mongoid:
self.log.debug('Entity "{}" is not synchronized to avalon.'.format(
entity.get('name', entity)
))
error_key = 'Missing MongoID on entities (try SyncToAvalon first)'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
if ent_name not in self.interface_messages[error_key]:
self.interface_messages[error_key].append(ent_name)
self.log.warning(
'-- entity "{}" is not synchronized to avalon. Skipping'.format(
ent_name
)
)
return
try:
mongoid = ObjectId(mongoid)
except Exception:
self.log.warning('Entity "{}" has stored invalid MongoID.'.format(
entity.get('name', entity)
))
error_key = 'Invalid MongoID on entities (try SyncToAvalon)'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
if ent_name not in self.interface_messages[error_key]:
self.interface_messages[error_key].append(ent_name)
self.log.warning(
'-- entity "{}" has stored invalid MongoID. Skipping'.format(
ent_name
)
)
return
# Find entity in Mongo DB
mongo_entity = self.db_con.find_one({'_id': mongoid})
if not mongo_entity:
error_key = 'Entities not found in Avalon DB (try SyncToAvalon)'
if error_key not in self.interface_messages:
self.interface_messages[error_key] = []
if ent_name not in self.interface_messages[error_key]:
self.interface_messages[error_key].append(ent_name)
self.log.warning(
'Entity "{}" is not synchronized to avalon.'.format(
entity.get('name', entity)
'-- entity "{}" was not found in DB by id "{}". Skipping'.format(
ent_name, str(mongoid)
)
)
return

View file

@ -2,11 +2,10 @@ from pype.vendor import ftrack_api
from pype.ftrack import BaseEvent
ignore_me = True
class Radio_buttons(BaseEvent):
ignore_me = True
def launch(self, session, event):
'''Provides a readio button behaviour to any bolean attribute in
radio_button group.'''
@ -34,6 +33,7 @@ class Radio_buttons(BaseEvent):
session.commit()
def register(session):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):

View file

@ -112,7 +112,7 @@ class Sync_to_Avalon(BaseEvent):
{'type': 'label', 'value': '# Fatal Error'},
{'type': 'label', 'value': '<p>{}</p>'.format(ftrack_message)}
]
self.show_interface(event, items, title)
self.show_interface(items, title, event=event)
self.log.error('Fatal error during sync: {}'.format(message))
return

View file

@ -5,11 +5,10 @@ from pype.vendor import ftrack_api
from pype.ftrack import BaseEvent
ignore_me = True
class Test_Event(BaseEvent):
ignore_me = True
priority = 10000
def launch(self, session, event):

View file

@ -80,23 +80,16 @@ class FtrackServer():
if '.pyc' in file or '.py' not in file:
continue
ignore = 'ignore_me'
mod = importlib.import_module(os.path.splitext(file)[0])
importlib.reload(mod)
mod_functions = dict(
[
(name, function)
for name, function in mod.__dict__.items()
if isinstance(function, types.FunctionType) or
name == ignore
if isinstance(function, types.FunctionType)
]
)
# Don't care about ignore_me files
if (
ignore in mod_functions and
mod_functions[ignore] is True
):
continue
# separate files by register function
if 'register' not in mod_functions:
msg = (

View file

@ -1,14 +1,13 @@
import os
import re
import json
from pype import lib as pypelib
from pype.lib import get_avalon_database
from bson.objectid import ObjectId
import avalon
import avalon.api
from avalon import schema
from avalon.vendor import toml, jsonschema
from pypeapp import Logger
from pypeapp import Logger, Anatomy, config
ValidationError = jsonschema.ValidationError
@ -53,8 +52,8 @@ def import_to_avalon(
if entity_type in ['Project']:
type = 'project'
config = get_project_config(entity)
schema.validate(config)
proj_config = get_project_config(entity)
schema.validate(proj_config)
av_project_code = None
if av_project is not None and 'code' in av_project['data']:
@ -62,13 +61,12 @@ def import_to_avalon(
ft_project_code = ft_project['name']
if av_project is None:
project_schema = pypelib.get_avalon_project_template_schema()
item = {
'schema': project_schema,
'schema': "avalon-core:project-2.0",
'type': type,
'name': project_name,
'data': dict(),
'config': config,
'config': proj_config,
'parent': None,
}
schema.validate(item)
@ -214,9 +212,8 @@ def import_to_avalon(
{'type': 'asset', 'name': name}
)
if avalon_asset is None:
asset_schema = pypelib.get_avalon_asset_template_schema()
item = {
'schema': asset_schema,
'schema': "avalon-core:asset-2.0",
'name': name,
'silo': silo,
'parent': ObjectId(projectId),
@ -345,13 +342,12 @@ def changeability_check_childs(entity):
childs = entity['children']
for child in childs:
if child.entity_type.lower() == 'task':
config = get_config_data()
if 'sync_to_avalon' in config:
config = config['sync_to_avalon']
if 'statuses_name_change' in config:
available_statuses = config['statuses_name_change']
else:
available_statuses = []
available_statuses = config.get_presets().get(
"ftrack", {}).get(
"ftrack_config", {}).get(
"sync_to_avalon", {}).get(
"statuses_name_change", []
)
ent_status = child['status']['name'].lower()
if ent_status not in available_statuses:
return False
@ -480,14 +476,28 @@ def get_avalon_project(ft_project):
return avalon_project
def get_project_config(entity):
config = {}
config['schema'] = pypelib.get_avalon_project_config_schema()
config['tasks'] = get_tasks(entity)
config['apps'] = get_project_apps(entity)
config['template'] = pypelib.get_avalon_project_template()
def get_avalon_project_template():
"""Get avalon template
return config
Returns:
dictionary with templates
"""
templates = Anatomy().templates
return {
'workfile': templates["avalon"]["workfile"],
'work': templates["avalon"]["work"],
'publish': templates["avalon"]["publish"]
}
def get_project_config(entity):
proj_config = {}
proj_config['schema'] = 'avalon-core:config-1.0'
proj_config['tasks'] = get_tasks(entity)
proj_config['apps'] = get_project_apps(entity)
proj_config['template'] = get_avalon_project_template()
return proj_config
def get_tasks(project):
@ -539,7 +549,7 @@ def avalon_check_name(entity, inSchema=None):
if entity.entity_type in ['Project']:
# data['type'] = 'project'
name = entity['full_name']
# schema = get_avalon_project_template_schema()
# schema = "avalon-core:project-2.0"
data['silo'] = 'Film'
@ -557,24 +567,6 @@ def avalon_check_name(entity, inSchema=None):
raise ValueError(msg.format(name))
def get_config_data():
path_items = [pypelib.get_presets_path(), 'ftrack', 'ftrack_config.json']
filepath = os.path.sep.join(path_items)
data = dict()
try:
with open(filepath) as data_file:
data = json.load(data_file)
except Exception as e:
msg = (
'Loading "Ftrack Config file" Failed.'
' Please check log for more information.'
)
log.warning("{} - {}".format(msg, str(e)))
return data
def show_errors(obj, event, errors):
title = 'Hey You! You raised few Errors! (*look below*)'
items = []
@ -596,4 +588,4 @@ def show_errors(obj, event, errors):
obj.log.error(
'{}: {}'.format(key, message)
)
obj.show_interface(event, items, title)
obj.show_interface(items, title, event=event)

View file

@ -5,7 +5,7 @@ from avalon import lib as avalonlib
import acre
from pype import api as pype
from pype import lib as pypelib
from .avalon_sync import get_config_data
from pypeapp import config
from .ftrack_base_handler import BaseHandler
from pypeapp import Anatomy
@ -225,7 +225,13 @@ class AppAction(BaseHandler):
self.log.exception(
"{0} Error in anatomy.format: {1}".format(__name__, e)
)
os.environ["AVALON_WORKDIR"] = os.path.normpath(work_template)
workdir = os.path.normpath(work_template)
os.environ["AVALON_WORKDIR"] = workdir
try:
os.makedirs(workdir)
except FileExistsError:
pass
# collect all parents from the task
parents = []
@ -328,10 +334,10 @@ class AppAction(BaseHandler):
pass
# Change status of task to In progress
config = get_config_data()
presets = config.get_presets()["ftrack"]["ftrack_config"]
if 'status_update' in config:
statuses = config['status_update']
if 'status_update' in presets:
statuses = presets['status_update']
actual_status = entity['status']['name'].lower()
next_status_name = None
@ -351,7 +357,7 @@ class AppAction(BaseHandler):
session.commit()
except Exception:
msg = (
'Status "{}" in config wasn\'t found on Ftrack'
'Status "{}" in presets wasn\'t found on Ftrack'
).format(next_status_name)
self.log.warning(msg)

View file

@ -26,6 +26,7 @@ class BaseHandler(object):
priority = 100
# Type is just for logging purpose (e.g.: Action, Event, Application,...)
type = 'No-type'
ignore_me = False
preactions = []
def __init__(self, session):
@ -41,6 +42,8 @@ class BaseHandler(object):
def register_decorator(self, func):
@functools.wraps(func)
def wrapper_register(*args, **kwargs):
if self.ignore_me:
return
label = self.__class__.__name__
if hasattr(self, 'label'):
if self.variant is None:
@ -194,7 +197,6 @@ class BaseHandler(object):
def _translate_event(self, session, event):
'''Return *event* translated structure to be used with the API.'''
'''Return *event* translated structure to be used with the API.'''
_entities = event['data'].get('entities_object', None)
if (
_entities is None or
@ -209,25 +211,28 @@ class BaseHandler(object):
event
]
def _get_entities(self, event):
self.session._local_cache.clear()
selection = event['data'].get('selection', [])
def _get_entities(self, event, session=None):
if session is None:
session = self.session
session._local_cache.clear()
selection = event['data'].get('selection') or []
_entities = []
for entity in selection:
_entities.append(
self.session.get(
self._get_entity_type(entity),
entity.get('entityId')
)
)
_entities.append(session.get(
self._get_entity_type(entity, session),
entity.get('entityId')
))
event['data']['entities_object'] = _entities
return _entities
def _get_entity_type(self, entity):
def _get_entity_type(self, entity, session=None):
'''Return translated entity type tht can be used with API.'''
# Get entity type and make sure it is lower cased. Most places except
# the component tab in the Sidebar will use lower case notation.
entity_type = entity.get('entityType').replace('_', '').lower()
if session is None:
session = self.session
for schema in self.session.schemas:
alias_for = schema.get('alias_for')
@ -430,12 +435,47 @@ class BaseHandler(object):
on_error='ignore'
)
def show_interface(self, event, items, title=''):
def show_interface(
self, items, title='',
event=None, user=None, username=None, user_id=None
):
"""
Shows interface to user who triggered event
Shows interface to user
- to identify user must be entered one of args:
event, user, username, user_id
- 'items' must be list containing Ftrack interface items
"""
user_id = event['source']['user']['id']
if not any([event, user, username, user_id]):
raise TypeError((
'Missing argument `show_interface` requires one of args:'
' event (ftrack_api Event object),'
' user (ftrack_api User object)'
' username (string) or user_id (string)'
))
if event:
user_id = event['source']['user']['id']
elif user:
user_id = user['id']
else:
if user_id:
key = 'id'
value = user_id
else:
key = 'username'
value = username
user = self.session.query(
'User where {} is "{}"'.format(key, value)
).first()
if not user:
raise TypeError((
'Ftrack user with {} "{}" was not found!'.format(key, value)
))
user_id = user['id']
target = (
'applicationId=ftrack.client.web and user.id="{0}"'
).format(user_id)
@ -452,3 +492,33 @@ class BaseHandler(object):
),
on_error='ignore'
)
def show_interface_from_dict(
self, messages, event=None, user=None, username=None, user_id=None
):
if not messages:
self.log.debug("No messages to show! (messages dict is empty)")
return
items = []
title = 'Errors during mirroring'
splitter = {'type': 'label', 'value': '---'}
first = True
for key, value in messages.items():
if not first:
items.append(splitter)
else:
first = False
subtitle = {'type': 'label', 'value':'<h3>{}</h3>'.format(key)}
items.append(subtitle)
if isinstance(value, list):
for item in value:
message = {
'type': 'label', 'value': '<p>{}</p>'.format(item)
}
items.append(message)
else:
message = {'type': 'label', 'value': '<p>{}</p>'.format(value)}
items.append(message)
self.show_interface(items, title, event, user, username, user_id)

View file

@ -25,9 +25,12 @@ class BaseEvent(BaseHandler):
def wrapper_launch(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
self.log.info('{} Failed ({})'.format(
self.__class__.__name__, str(e))
except Exception as exc:
self.log.error(
'Event "{}" Failed: {}'.format(
self.__class__.__name__, str(exc)
),
exc_info=True
)
return wrapper_launch
@ -43,22 +46,7 @@ class BaseEvent(BaseHandler):
self.session.rollback()
self.session._local_cache.clear()
try:
self.launch(
self.session, event
)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log_message = "{}/{}/Line: {}".format(
exc_type, fname, exc_tb.tb_lineno
)
self.log.error(
'Error during syncToAvalon: {}'.format(log_message),
exc_info=True
)
return
self.launch(self.session, event)
def _translate_event(self, session, event):
'''Return *event* translated structure to be used with the API.'''

View file

@ -138,8 +138,8 @@ def update_frame_range(comp, representations):
versions = io.find({"type": "version", "_id": {"$in": version_ids}})
versions = list(versions)
start = min(v["data"]["startFrame"] for v in versions)
end = max(v["data"]["endFrame"] for v in versions)
start = min(v["data"]["frameStart"] for v in versions)
end = max(v["data"]["frameEnd"] for v in versions)
fusion_lib.update_frame_range(start, end, comp=comp)

View file

@ -10,10 +10,7 @@ from avalon.houdini import pipeline as houdini
from pype.houdini import lib
from pype.lib import (
any_outdated,
update_task_from_path
)
from pype.lib import any_outdated
PARENT_DIR = os.path.dirname(__file__)
@ -57,8 +54,6 @@ def on_save(*args):
avalon.logger.info("Running callback on save..")
update_task_from_path(hou.hipFile.path())
nodes = lib.get_id_required_nodes()
for node, new_id in lib.generate_ids(nodes):
lib.set_id(node, new_id, overwrite=False)
@ -68,8 +63,6 @@ def on_open(*args):
avalon.logger.info("Running callback on open..")
update_task_from_path(hou.hipFile.path())
if any_outdated():
from ..widgets import popup

View file

@ -205,7 +205,7 @@ def validate_fps():
"""
fps = lib.get_asset_fps()
fps = lib.get_asset()["data"]["fps"]
current_fps = hou.fps() # returns float
if current_fps != fps:

View file

@ -34,11 +34,32 @@ def _subprocess(args):
raise ValueError("\"{}\" was not successful: {}".format(args, output))
def get_handle_irregular(asset):
data = asset["data"]
handle_start = data.get("handle_start", 0)
handle_end = data.get("handle_end", 0)
return (handle_start, handle_end)
def get_hierarchy(asset_name=None):
"""
Obtain asset hierarchy path string from mongo db
Returns:
string: asset hierarchy path
"""
if not asset_name:
asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"])
asset = io.find_one({
"type": 'asset',
"name": asset_name
})
hierarchy_items = []
entity = asset
while True:
parent_id = entity.get("data", {}).get("visualParent")
if not parent_id:
break
entity = io.find_one({"_id": parent_id})
hierarchy_items.append(entity["name"])
return "/".join(hierarchy_items)
def add_tool_to_environment(tools):
@ -157,45 +178,6 @@ def any_outdated():
return False
def update_task_from_path(path):
"""Update the context using the current scene state.
When no changes to the context it will not trigger an update.
When the context for a file could not be parsed an error is logged but not
raised.
"""
if not path:
log.warning("Can't update the current task. Scene is not saved.")
return
# Find the current context from the filename
project = io.find_one({"type": "project"},
projection={"config.template.work": True})
template = project['config']['template']['work']
# Force to use the registered to root to avoid using wrong paths
template = pather.format(template, {"root": avalon.api.registered_root()})
try:
context = pather.parse(template, path)
except ParseError:
log.error("Can't update the current task. Unable to parse the "
"task for: %s (pattern: %s)", path, template)
return
# Find the changes between current Session and the path's context.
current = {
"asset": avalon.api.Session["AVALON_ASSET"],
"task": avalon.api.Session["AVALON_TASK"]
# "app": avalon.api.Session["AVALON_APP"]
}
changes = {key: context[key] for key, current_value in current.items()
if context[key] != current_value}
if changes:
log.info("Updating work task to: %s", context)
avalon.api.update_current_task(**changes)
def _rreplace(s, a, b, n=1):
"""Replace a with b in string s from right side n times"""
return b.join(s.rsplit(a, n))
@ -215,7 +197,7 @@ def version_up(filepath):
dirname = os.path.dirname(filepath)
basename, ext = os.path.splitext(os.path.basename(filepath))
regex = "[._]v\d+"
regex = r"[._]v\d+"
matches = re.findall(regex, str(basename), re.IGNORECASE)
if not matches:
log.info("Creating version...")
@ -223,7 +205,7 @@ def version_up(filepath):
new_basename = "{}{}".format(basename, new_label)
else:
label = matches[-1]
version = re.search("\d+", label).group()
version = re.search(r"\d+", label).group()
padding = len(version)
new_version = int(version) + 1
@ -331,140 +313,107 @@ def _get_host_name():
return _host.__name__.rsplit(".", 1)[-1]
def collect_container_metadata(container):
"""Add additional data based on the current host
def get_asset(asset_name=None):
entity_data_keys_from_project_when_miss = [
"frameStart", "frameEnd", "handleStart", "handleEnd", "fps",
"resolutionWidth", "resolutionHeight"
]
If the host application's lib module does not have a function to inject
additional data it will return the input container
entity_keys_from_project_when_miss = []
alternatives = {
"handleStart": "handles",
"handleEnd": "handles"
}
defaults = {
"handleStart": 0,
"handleEnd": 0
}
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
asset_document = io.find_one({"name": asset_name, "type": "asset"})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
project_document = io.find_one({"type": "project"})
for key in entity_data_keys_from_project_when_miss:
if asset_document["data"].get(key):
continue
value = project_document["data"].get(key)
if value is not None or key not in alternatives:
asset_document["data"][key] = value
continue
alt_key = alternatives[key]
value = asset_document["data"].get(alt_key)
if value is not None:
asset_document["data"][key] = value
continue
value = project_document["data"].get(alt_key)
if value:
asset_document["data"][key] = value
continue
if key in defaults:
asset_document["data"][key] = defaults[key]
for key in entity_keys_from_project_when_miss:
if asset_document.get(key):
continue
value = project_document.get(key)
if value is not None or key not in alternatives:
asset_document[key] = value
continue
alt_key = alternatives[key]
value = asset_document.get(alt_key)
if value:
asset_document[key] = value
continue
value = project_document.get(alt_key)
if value:
asset_document[key] = value
continue
if key in defaults:
asset_document[key] = defaults[key]
return asset_document
def get_project():
io.install()
return io.find_one({"type": "project"})
def get_version_from_path(file):
"""
Finds version number in file path string
Args:
container (dict): collection if representation data in host
file (string): file path
Returns:
generator
"""
# TODO: Improve method of getting the host lib module
host_name = _get_host_name()
package_name = "pype.{}.lib".format(host_name)
hostlib = importlib.import_module(package_name)
if not hasattr(hostlib, "get_additional_data"):
return {}
return hostlib.get_additional_data(container)
def get_asset_fps():
"""Returns project's FPS, if not found will return 25 by default
Returns:
int, float
v: version number in string ('001')
"""
key = "fps"
# FPS from asset data (if set)
asset_data = get_asset_data()
if key in asset_data:
return asset_data[key]
# FPS from project data (if set)
project_data = get_project_data()
if key in project_data:
return project_data[key]
# Fallback to 25 FPS
return 25.0
def get_project_data():
"""Get the data of the current project
The data of the project can contain things like:
resolution
fps
renderer
Returns:
dict:
"""
project_name = io.active_project()
project = io.find_one({"name": project_name,
"type": "project"},
projection={"data": True})
data = project.get("data", {})
return data
def get_asset_data(asset=None):
"""Get the data from the current asset
Args:
asset(str, Optional): name of the asset, eg:
Returns:
dict
"""
asset_name = asset or avalon.api.Session["AVALON_ASSET"]
document = io.find_one({"name": asset_name,
"type": "asset"})
data = document.get("data", {})
return data
def get_data_hierarchical_attr(entity, attr_name):
vp_attr = 'visualParent'
data = entity['data']
value = data.get(attr_name, None)
if value is not None:
return value
elif vp_attr in data:
if data[vp_attr] is None:
parent_id = entity['parent']
else:
parent_id = data[vp_attr]
parent = io.find_one({"_id": parent_id})
return get_data_hierarchical_attr(parent, attr_name)
else:
return None
def get_avalon_project_config_schema():
schema = 'avalon-core:config-1.0'
return schema
def get_avalon_project_template_schema():
schema = "avalon-core:project-2.0"
return schema
def get_avalon_project_template():
from pypeapp import Anatomy
"""
Get avalon template
Returns:
dictionary with templates
"""
templates = Anatomy().templates
proj_template = {}
proj_template['workfile'] = templates["avalon"]["workfile"]
proj_template['work'] = templates["avalon"]["work"]
proj_template['publish'] = templates["avalon"]["publish"]
return proj_template
def get_avalon_asset_template_schema():
schema = "avalon-core:asset-2.0"
return schema
pattern = re.compile(r"[\._]v([0-9]*)")
try:
return pattern.findall(file)[0]
except IndexError:
log.error(
"templates:get_version_from_workfile:"
"`{}` missing version string."
"Example `v004`".format(file)
)
def get_avalon_database():
@ -474,31 +423,20 @@ def get_avalon_database():
def set_io_database():
project = os.environ.get('AVALON_PROJECT', '')
asset = os.environ.get('AVALON_ASSET', '')
silo = os.environ.get('AVALON_SILO', '')
os.environ['AVALON_PROJECT'] = project
os.environ['AVALON_ASSET'] = asset
os.environ['AVALON_SILO'] = silo
required_keys = ["AVALON_PROJECT", "AVALON_ASSET", "AVALON_SILO"]
for key in required_keys:
os.environ[key] = os.environ.get(key, "")
io.install()
def get_all_avalon_projects():
db = get_avalon_database()
project_names = db.collection_names()
projects = []
for name in project_names:
for name in db.collection_names():
projects.append(db[name].find_one({'type': 'project'}))
return projects
def get_presets_path():
templates = os.environ['PYPE_CONFIG']
path_items = [templates, 'presets']
filepath = os.path.sep.join(path_items)
return filepath
def filter_pyblish_plugins(plugins):
"""
This servers as plugin filter / modifier for pyblish. It will load plugin

View file

@ -280,8 +280,8 @@ def collect_animation_data():
# build attributes
data = OrderedDict()
data["startFrame"] = start
data["endFrame"] = end
data["frameStart"] = start
data["frameEnd"] = end
data["handles"] = 0
data["step"] = 1.0
data["fps"] = fps
@ -1858,16 +1858,16 @@ def set_context_settings():
# Todo (Wijnand): apply renderer and resolution of project
project_data = lib.get_project_data()
asset_data = lib.get_asset_data()
project_data = lib.get_project()["data"]
asset_data = lib.get_asset()["data"]
# Set project fps
fps = asset_data.get("fps", project_data.get("fps", 25))
set_scene_fps(fps)
# Set project resolution
width_key = "resolution_width"
height_key = "resolution_height"
width_key = "resolutionWidth"
height_key = "resolutionHeight"
width = asset_data.get(width_key, project_data.get(width_key, 1920))
height = asset_data.get(height_key, project_data.get(height_key, 1080))
@ -1887,7 +1887,7 @@ def validate_fps():
"""
fps = lib.get_asset_fps()
fps = lib.get_asset()["data"]["fps"]
current_fps = mel.eval('currentTimeUnitToFPS()') # returns float
if current_fps != fps:

View file

@ -45,7 +45,7 @@ def checkInventoryVersions():
if container:
node = container["_node"]
avalon_knob_data = get_avalon_knob_data(node)
avalon_knob_data = avalon.nuke.get_avalon_knob_data(node)
# get representation from io
representation = io.find_one({
@ -88,7 +88,7 @@ def writes_version_sync():
for each in nuke.allNodes():
if each.Class() == 'Write':
avalon_knob_data = get_avalon_knob_data(each)
avalon_knob_data = avalon.nuke.get_avalon_knob_data(each)
try:
if avalon_knob_data['families'] not in ["render"]:
@ -119,7 +119,7 @@ def version_up_script():
def get_render_path(node):
data = dict()
data['avalon'] = get_avalon_knob_data(node)
data['avalon'] = avalon.nuke.get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
@ -153,15 +153,15 @@ def format_anatomy(data):
if not version:
file = script_name()
data["version"] = pype.get_version_from_path(file)
project_document = pype.get_project()
data.update({
"root": api.Session["AVALON_PROJECTS"],
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": str(pype.get_task()).lower(),
"task": api.Session["AVALON_TASK"].lower(),
"family": data["avalon"]["family"],
"project": {"name": pype.get_project_name(),
"code": pype.get_project_code()},
"project": {"name": project_document["name"],
"code": project_document["data"].get("code", '')},
"representation": data["nuke_dataflow_writes"]["file_type"],
"app": data["application"]["application_dir"],
"hierarchy": pype.get_hierarchy(),
@ -321,16 +321,7 @@ def create_write_node(name, data, prenodes=None):
lnk.makeLink(write_node.name(), "Render")
lnk.setName("Render")
GN.addKnob(lnk)
# linking knobs to group property panel
linking_knobs = ["first", "last", "use_limit"]
for k in linking_knobs:
lnk = nuke.Link_Knob(k)
lnk.makeLink(write_node.name(), k)
lnk.setName(k.replace('_', ' ').capitalize())
lnk.clearFlag(nuke.STARTLINE)
GN.addKnob(lnk)
return GN
@ -449,17 +440,17 @@ def reset_frame_range_handles():
root = nuke.root()
name = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": name, "type": "asset"})
asset_entity = pype.get_asset(name)
if "data" not in asset:
if "data" not in asset_entity:
msg = "Asset {} don't have set any 'data'".format(name)
log.warning(msg)
nuke.message(msg)
return
data = asset["data"]
data = asset_entity["data"]
missing_cols = []
check_cols = ["fps", "fstart", "fend", "handle_start", "handle_end"]
check_cols = ["fps", "frameStart", "frameEnd", "handleStart", "handleEnd"]
for col in check_cols:
if col not in data:
@ -473,30 +464,27 @@ def reset_frame_range_handles():
return
# get handles values
handles = avalon.nuke.get_handles(asset)
handle_start, handle_end = pype.get_handle_irregular(asset)
handle_start = asset_entity["data"]["handleStart"]
handle_end = asset_entity["data"]["handleEnd"]
fps = asset["data"]["fps"]
edit_in = int(asset["data"]["fstart"]) - handle_start
edit_out = int(asset["data"]["fend"]) + handle_end
fps = asset_entity["data"]["fps"]
frame_start = int(asset_entity["data"]["frameStart"]) - handle_start
frame_end = int(asset_entity["data"]["frameEnd"]) + handle_end
root["fps"].setValue(fps)
root["first_frame"].setValue(edit_in)
root["last_frame"].setValue(edit_out)
root["first_frame"].setValue(frame_start)
root["last_frame"].setValue(frame_end)
log.info("__ handles: `{}`".format(handles))
log.info("__ handle_start: `{}`".format(handle_start))
log.info("__ handle_end: `{}`".format(handle_end))
log.info("__ edit_in: `{}`".format(edit_in))
log.info("__ edit_out: `{}`".format(edit_out))
log.info("__ fps: `{}`".format(fps))
# setting active viewers
nuke.frame(int(asset["data"]["fstart"]))
nuke.frame(int(asset_entity["data"]["frameStart"]))
range = '{0}-{1}'.format(
int(asset["data"]["fstart"]),
int(asset["data"]["fend"]))
int(asset_entity["data"]["frameStart"]),
int(asset_entity["data"]["frameEnd"]))
for node in nuke.allNodes(filter="Viewer"):
node['frame_range'].setValue(range)
@ -510,21 +498,12 @@ def reset_frame_range_handles():
# adding handle_start/end to root avalon knob
if not avalon.nuke.set_avalon_knob_data(root, {
"handle_start": int(handle_start),
"handle_end": int(handle_end)
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}):
log.warning("Cannot set Avalon knob to Root node!")
def get_avalon_knob_data(node):
import toml
try:
data = toml.loads(node['avalon'].value())
except Exception:
return None
return data
def reset_resolution():
"""Set resolution to project resolution."""
log.info("Reseting resolution")
@ -532,9 +511,9 @@ def reset_resolution():
asset = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": asset, "type": "asset"})
width = asset.get('data', {}).get('resolution_width')
height = asset.get('data', {}).get('resolution_height')
pixel_aspect = asset.get('data', {}).get('pixel_aspect')
width = asset.get('data', {}).get("resolutionWidth")
height = asset.get('data', {}).get("resolutionHeight")
pixel_aspect = asset.get('data', {}).get("pixelAspect")
log.info("pixel_aspect: {}".format(pixel_aspect))
if any(not x for x in [width, height, pixel_aspect]):
@ -575,7 +554,7 @@ def reset_resolution():
crnt_fmt_kargs = {
"width": (check_format.width()),
"height": (check_format.height()),
"pixel_aspect": float(check_format.pixelAspect())
"pixelAspect": float(check_format.pixelAspect())
}
if bbox:
crnt_fmt_kargs.update({
@ -590,7 +569,7 @@ def reset_resolution():
new_fmt_kargs = {
"width": int(width),
"height": int(height),
"pixel_aspect": float(pixel_aspect),
"pixelAspect": float(pixel_aspect),
"project_name": format_name
}
if bbox:
@ -620,13 +599,13 @@ def make_format_string(**args):
"{y} "
"{r} "
"{t} "
"{pixel_aspect:.2f}".format(**args)
"{pixelAspect:.2f}".format(**args)
)
else:
return (
"{width} "
"{height} "
"{pixel_aspect:.2f}".format(**args)
"{pixelAspect:.2f}".format(**args)
)
@ -668,60 +647,6 @@ def get_hierarchical_attr(entity, attr, default=None):
return get_hierarchical_attr(parent, attr)
# TODO: bellow functions are wip and needs to be check where they are used
# ------------------------------------
#
# def update_frame_range(start, end, root=None):
# """Set Nuke script start and end frame range
#
# Args:
# start (float, int): start frame
# end (float, int): end frame
# root (object, Optional): root object from nuke's script
#
# Returns:
# None
#
# """
#
# knobs = {
# "first_frame": start,
# "last_frame": end
# }
#
# with avalon.nuke.viewer_update_and_undo_stop():
# for key, value in knobs.items():
# if root:
# root[key].setValue(value)
# else:
# nuke.root()[key].setValue(value)
#
# #
# def get_additional_data(container):
# """Get Nuke's related data for the container
#
# Args:
# container(dict): the container found by the ls() function
#
# Returns:
# dict
# """
#
# node = container["_node"]
# tile_color = node['tile_color'].value()
# if tile_color is None:
# return {}
#
# hex = '%08x' % tile_color
# rgba = [
# float(int(hex[0:2], 16)) / 255.0,
# float(int(hex[2:4], 16)) / 255.0,
# float(int(hex[4:6], 16)) / 255.0
# ]
#
# return {"color": Qt.QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])}
def get_write_node_template_attr(node):
''' Gets all defined data from presets
@ -729,7 +654,7 @@ def get_write_node_template_attr(node):
'''
# get avalon data from node
data = dict()
data['avalon'] = get_avalon_knob_data(node)
data['avalon'] = avalon.nuke.get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
"preset": data['avalon']['families']
@ -747,7 +672,7 @@ def get_write_node_template_attr(node):
# adding dataflow template
{correct_data.update({k: v})
for k, v in nuke_dataflow_writes.items()
if k not in ["id", "previous"]}
if k not in ["_id", "_previous"]}
# adding colorspace template
{correct_data.update({k: v})

View file

@ -46,14 +46,14 @@ def set_workfiles():
project.setProjectRoot(active_project_root)
# get project data from avalon db
project_data = pype.get_project_data()
project_data = pype.get_project()["data"]
log.info("project_data: {}".format(project_data))
# get format and fps property from avalon db on project
width = project_data['resolution_width']
height = project_data['resolution_height']
pixel_aspect = project_data['pixel_aspect']
width = project_data["resolutionWidth"]
height = project_data["resolutionHeight"]
pixel_aspect = project_data["pixelAspect"]
fps = project_data['fps']
format_name = project_data['code']

View file

@ -14,9 +14,9 @@ def create_nk_script_clips(script_lst, seq=None):
'handles': 10,
'handleStart': 15, # added asymetrically to handles
'handleEnd': 10, # added asymetrically to handles
'timelineIn': 16,
'startFrame': 991,
'endFrame': 1023,
"clipIn": 16,
"frameStart": 991,
"frameEnd": 1023,
'task': 'Comp-tracking',
'work_dir': 'VFX_PR',
'shot': '00010'
@ -55,12 +55,12 @@ def create_nk_script_clips(script_lst, seq=None):
if media_in:
source_in = media_in + handle_start
else:
source_in = nk['startFrame'] + handle_start
source_in = nk["frameStart"] + handle_start
if media_duration:
source_out = (media_in + media_duration - 1) - handle_end
else:
source_out = nk['endFrame'] - handle_end
source_out = nk["frameEnd"] - handle_end
print("__ media: `{}`".format(media))
print("__ media_in: `{}`".format(media_in))
@ -98,8 +98,8 @@ def create_nk_script_clips(script_lst, seq=None):
trackItem.setSourceIn(source_in)
trackItem.setSourceOut(source_out)
trackItem.setSourceIn(source_in)
trackItem.setTimelineIn(nk['timelineIn'])
trackItem.setTimelineOut(nk['timelineIn'] + (source_out - source_in))
trackItem.setTimelineIn(nk["clipIn"])
trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in))
track.addTrackItem(trackItem)
track.addTrackItem(trackItem)
clips_lst.append(trackItem)
@ -179,9 +179,9 @@ script_lst = [{
'handles': 10,
'handleStart': 10,
'handleEnd': 10,
'timelineIn': 16,
'startFrame': 991,
'endFrame': 1023,
"clipIn": 16,
"frameStart": 991,
"frameEnd": 1023,
'task': 'platesMain',
'work_dir': 'shots',
'shot': '120sh020'

View file

@ -87,13 +87,13 @@ class CollectContextDataFromAport(pyblish.api.ContextPlugin):
context.data["currentFile"] = current_file
# get project data from avalon
project_data = pype.get_project_data()
project_data = pype.get_project()["data"]
assert project_data, "No `project_data` data in avalon db"
context.data["projectData"] = project_data
self.log.debug("project_data: {}".format(project_data))
# get asset data from avalon and fix all paths
asset_data = pype.get_asset_data()
asset_data = pype.get_asset()["data"]
assert asset_data, "No `asset_data` data in avalon db"
asset_data = {k: v.replace("\\", "/") for k, v in asset_data.items()
if isinstance(v, str)}

View file

@ -39,19 +39,18 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
assert instances_data, "No `asset_default` data in json file"
asset_name = a_session["AVALON_ASSET"]
entity = io.find_one({"name": asset_name,
"type": "asset"})
entity = pype.get_asset(asset_name)
# get frame start > first try from asset data
frame_start = context.data["assetData"].get("fstart", None)
frame_start = context.data["assetData"].get("frameStart", None)
if not frame_start:
self.log.debug("frame_start not on assetData")
# get frame start > second try from parent data
frame_start = pype.get_data_hierarchical_attr(entity, "fstart")
frame_start = entity["data"]["frameStart"]
if not frame_start:
self.log.debug("frame_start not on any parent entity")
# get frame start > third try from parent data
frame_start = asset_default["fstart"]
frame_start = asset_default["frameStart"]
assert frame_start, "No `frame_start` data found, "
"please set `fstart` on asset"
@ -61,7 +60,7 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
handles = context.data["assetData"].get("handles", None)
if not handles:
# get frame start > second try from parent data
handles = pype.get_data_hierarchical_attr(entity, "handles")
handles = entity["data"]["handles"]
if not handles:
# get frame start > third try from parent data
handles = asset_default["handles"]
@ -129,7 +128,7 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
instance.data.update({
"subset": subset_name,
"task": task,
"fstart": frame_start,
"frameStart": frame_start,
"handles": handles,
"host": host,
"asset": asset,

View file

@ -76,11 +76,11 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
else:
end_frame += (
instance.data['endFrame'] - instance.data['startFrame']
instance.data["frameEnd"] - instance.data["frameStart"]
)
if not comp.get('frameRate'):
comp['frameRate'] = instance.context.data['fps']
if not comp.get('fps'):
comp['fps'] = instance.context.data['fps']
location = self.get_ftrack_location(
'ftrack.server', ft_session
)
@ -90,7 +90,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"metadata": {'ftr_meta': json.dumps({
'frameIn': int(start_frame),
'frameOut': int(end_frame),
'frameRate': float(comp['frameRate'])})}
'frameRate': float(comp['fps'])})}
}
comp['thumbnail'] = False
else:

View file

@ -27,8 +27,8 @@ class FusionSetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@ -60,8 +60,8 @@ class FusionSetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "

View file

@ -145,7 +145,7 @@ class FusionLoadSequence(api.Loader):
tool["Clip"] = path
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("startFrame", None)
start = context["version"]["data"].get("frameStart", None)
if start is not None:
loader_shift(tool, start, relative=False)
@ -175,7 +175,7 @@ class FusionLoadSequence(api.Loader):
been set.
- GlobalIn: Fusion reset to comp's global in if duration changes
- We change it to the "startFrame"
- We change it to the "frameStart"
- GlobalEnd: Fusion resets to globalIn + length if duration changes
- We do the same like Fusion - allow fusion to take control.
@ -212,7 +212,7 @@ class FusionLoadSequence(api.Loader):
# Get start frame from version data
version = io.find_one({"type": "version",
"_id": representation["parent"]})
start = version["data"].get("startFrame")
start = version["data"].get("frameStart")
if start is None:
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "

View file

@ -23,7 +23,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"""Collect Fusion saver instances
This additionally stores the Comp start and end render range in the
current context's data as "startFrame" and "endFrame".
current context's data as "frameStart" and "frameEnd".
"""
@ -43,8 +43,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
savers = [tool for tool in tools if tool.ID == "Saver"]
start, end = get_comp_render_range(comp)
context.data["startFrame"] = start
context.data["endFrame"] = end
context.data["frameStart"] = start
context.data["frameEnd"] = end
for tool in savers:
path = tool["Clip"][comp.TIME_UNDEFINED]

View file

@ -53,8 +53,8 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
# The instance has most of the information already stored
metadata = {
"regex": regex,
"startFrame": instance.context.data["startFrame"],
"endFrame": instance.context.data["endFrame"],
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}

View file

@ -79,8 +79,8 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Fusion",
"Frames": "{start}-{end}".format(
start=int(context.data["startFrame"]),
end=int(context.data["endFrame"])
start=int(context.data["frameStart"]),
end=int(context.data["frameEnd"])
),
"Comment": comment,

View file

@ -1,22 +1,15 @@
import os
import subprocess
import json
from pype import lib as pypelib
from pypeapp import config
from avalon import api
def get_config_data():
path_items = [pypelib.get_presets_path(), 'djv_view', 'config.json']
filepath = os.path.sep.join(path_items)
data = dict()
with open(filepath) as data_file:
data = json.load(data_file)
return data
def get_families():
families = []
paths = get_config_data().get('djv_paths', [])
paths = config.get_presets().get("djv_view", {}).get("config", {}).get(
"djv_paths", []
)
for path in paths:
if os.path.exists(path):
families.append("*")
@ -25,13 +18,15 @@ def get_families():
def get_representation():
return get_config_data().get('file_ext', [])
return config.get_presets().get("djv_view", {}).get("config", {}).get(
'file_ext', []
)
class OpenInDJV(api.Loader):
"""Open Image Sequence with system default"""
config_data = get_config_data()
config_data = config.get_presets().get("djv_view", {}).get("config", {})
families = get_families()
representations = get_representation()
@ -42,7 +37,9 @@ class OpenInDJV(api.Loader):
def load(self, context, name, namespace, data):
self.djv_path = None
paths = get_config_data().get('djv_paths', [])
paths = config.get_presets().get("djv_view", {}).get("config", {}).get(
"djv_paths", []
)
for path in paths:
if os.path.exists(path):
self.djv_path = path

View file

@ -67,9 +67,9 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
if isinstance(component['files'], list):
collections, remainder = clique.assemble(component['files'])
self.log.debug("collecting sequence: {}".format(collections))
instance.data['startFrame'] = int(component['startFrame'])
instance.data['endFrame'] = int(component['endFrame'])
instance.data['frameRate'] = int(component['frameRate'])
instance.data["frameStart"] = int(component["frameStart"])
instance.data["frameEnd"] = int(component["frameEnd"])
instance.data['fps'] = int(component['fps'])
instance.data["representations"].append(component)

View file

@ -6,14 +6,13 @@ from pprint import pformat
import pyblish.api
from avalon import api
import pype.api as pype
def collect(root,
regex=None,
exclude_regex=None,
startFrame=None,
endFrame=None):
frame_start=None,
frame_end=None):
"""Collect sequence collections in root"""
from avalon.vendor import clique
@ -52,10 +51,10 @@ def collect(root,
# Exclude any frames outside start and end frame.
for collection in collections:
for index in list(collection.indexes):
if startFrame is not None and index < startFrame:
if frame_start is not None and index < frame_start:
collection.indexes.discard(index)
continue
if endFrame is not None and index > endFrame:
if frame_end is not None and index > frame_end:
collection.indexes.discard(index)
continue
@ -77,8 +76,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
api.Session["AVALON_ASSET"]
subset (str): The subset to publish to. If not provided the sequence's
head (up to frame number) will be used.
startFrame (int): The start frame for the sequence
endFrame (int): The end frame for the sequence
frame_start (int): The start frame for the sequence
frame_end (int): The end frame for the sequence
root (str): The path to collect from (can be relative to the .json)
regex (str): A regex for the sequence filename
exclude_regex (str): A regex for filename to exclude from collection
@ -143,8 +142,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
collections = collect(root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
startFrame=data.get("startFrame"),
endFrame=data.get("endFrame"))
frame_start=data.get("frameStart"),
frame_end=data.get("frameEnd"))
self.log.info("Found collections: {}".format(collections))
@ -179,8 +178,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = data.get("startFrame", indices[0])
end = data.get("endFrame", indices[-1])
start = data.get("frameStart", indices[0])
end = data.get("frameEnd", indices[-1])
# root = os.path.normpath(root)
# self.log.info("Source: {}}".format(data.get("source", "")))
@ -194,8 +193,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"subset": subset,
"asset": data.get("asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"startFrame": start,
"endFrame": end,
"frameStart": start,
"frameEnd": end,
"fps": fps,
"source": data.get('source', '')
})
@ -211,7 +210,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
'files': list(collection),
"stagingDir": root,
"anatomy_template": "render",
"frameRate": fps,
"fps": fps,
"tags": ['review']
}
instance.data["representations"].append(representation)

View file

@ -1,7 +1,7 @@
import os
import json
import pyblish.api
from pype import lib as pypelib
from pypeapp import config
class CollectOutputRepreConfig(pyblish.api.ContextPlugin):
@ -12,13 +12,5 @@ class CollectOutputRepreConfig(pyblish.api.ContextPlugin):
hosts = ["shell"]
def process(self, context):
config_items = [
pypelib.get_presets_path(),
"ftrack",
"output_representation.json"
]
config_file = os.path.sep.join(config_items)
with open(config_file) as data_file:
config_data = json.load(data_file)
config_data = config.get_presets()["ftrack"]["output_representation"]
context.data['output_repre_config'] = config_data

View file

@ -12,6 +12,6 @@ class CollectProjectData(pyblish.api.ContextPlugin):
def process(self, context):
# get project data from avalon db
context.data["projectData"] = pype.get_project_data()
context.data["projectData"] = pype.get_project()["data"]
return

View file

@ -33,7 +33,7 @@ class ExtractBurnin(pype.api.Extractor):
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
"start_frame": int(instance.data['startFrame']),
"start_frame": int(instance.data["frameStart"]),
"version": version
}
self.log.debug("__ prep_data: {}".format(prep_data))

View file

@ -22,7 +22,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
families = ["imagesequence", "render", "write", "source"]
def process(self, instance):
start = instance.data.get("startFrame")
start = instance.data.get("frameStart")
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
collected_frames = os.listdir(stagingdir)

View file

@ -30,7 +30,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("startFrame")
start_frame = inst_data.get("frameStart")
self.log.debug("Families In: `{}`".format(instance.data["families"]))
@ -87,7 +87,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
repre_new = repre.copy()
new_tags = tags[:]
new_tags = [x for x in tags if x != "delete"]
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# add families
@ -187,16 +187,17 @@ class ExtractReview(pyblish.api.InstancePlugin):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
# if "delete" in tags:
# if "mov" in full_input_path:
# os.remove(full_input_path)
# self.log.debug("Removed: `{}`".format(full_input_path))
else:
continue
else:
continue
for repre in representations_new:
if "delete" in repre.get("tags", []):
representations_new.remove(repre)
self.log.debug(
"new representations: {}".format(representations_new))
instance.data["representations"] = representations_new

View file

@ -404,7 +404,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
"startFrame", "endFrame", "step", "handles", "sourceHashes"
"frameStart", "frameEnd", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:

View file

@ -36,9 +36,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template from anatomy that should be used for
integrating this file. Only the first level can
be specified right now.
'startFrame'
'endFrame'
'framerate'
"frameStart"
"frameEnd"
'fps'
"""
label = "Integrate Asset New"
@ -303,10 +303,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_tail = dst_collection.format("{tail}")
index_frame_start = None
if repre.get('startFrame'):
if repre.get("frameStart"):
frame_start_padding = len(str(
repre.get('endFrame')))
index_frame_start = repre.get('startFrame')
repre.get("frameEnd")))
index_frame_start = repre.get("frameStart")
dst_padding_exp = src_padding_exp
for i in src_collection.indexes:
@ -410,6 +410,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
Args:
instance: the instance to integrate
"""
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
if os.path.normpath(src) != os.path.normpath(dest):
self.copy_file(src, dest)
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
self.copy_file(src, dest)
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
@ -544,8 +553,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
"startFrame", "endFrame", "step", "handles",
"handle_end", "handle_start", "sourceHashes"
"frameStart", "frameEnd", "step", "handles",
"handleEnd", "handleStart", "sourceHashes"
]
for key in optionals:
if key in instance.data:

View file

@ -408,7 +408,7 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["startFrame", "endFrame", "step",
optionals = ["frameStart", "frameEnd", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:

View file

@ -121,7 +121,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
- publishJobState (str, Optional): "Active" or "Suspended"
This defaults to "Suspended"
This requires a "startFrame" and "endFrame" to be present in instance.data
This requires a "frameStart" and "frameEnd" to be present in instance.data
or in context.data.
"""
@ -259,12 +259,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Get start/end frame from instance, if not available get from context
context = instance.context
start = instance.data.get("startFrame")
start = instance.data.get("frameStart")
if start is None:
start = context.data["startFrame"]
end = instance.data.get("endFrame")
start = context.data["frameStart"]
end = instance.data.get("frameEnd")
if end is None:
end = context.data["endFrame"]
end = context.data["frameEnd"]
# Add in regex for sequence filename
# This assumes the output files start with subset name and ends with
@ -289,8 +289,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
metadata = {
"asset": asset,
"regex": regex,
"startFrame": start,
"endFrame": end,
"frameStart": start,
"frameEnd": end,
"fps": context.data.get("fps", None),
"families": ["render"],
"source": source,
@ -338,8 +338,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
prev_start = version["data"]["startFrame"]
prev_end = version["data"]["endFrame"]
prev_start = version["data"]["frameStart"]
prev_end = version["data"]["frameEnd"]
subset_resources = get_resources(version, _ext)
resource_files = get_resource_files(subset_resources,
@ -375,12 +375,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Please do so when fixing this.
# Start frame
metadata["startFrame"] = updated_start
metadata["metadata"]["instance"]["startFrame"] = updated_start
metadata["frameStart"] = updated_start
metadata["metadata"]["instance"]["frameStart"] = updated_start
# End frame
metadata["endFrame"] = updated_end
metadata["metadata"]["instance"]["endFrame"] = updated_end
metadata["frameEnd"] = updated_end
metadata["metadata"]["instance"]["frameEnd"] = updated_end
metadata_filename = "{}_metadata.json".format(subset)

View file

@ -0,0 +1,12 @@
import pyblish.api
class ValidateFileSequences(pyblish.api.ContextPlugin):
"""Validates whether any file sequences were collected."""
order = pyblish.api.ValidatorOrder
targets = ["filesequence"]
label = "Validate File Sequences"
def process(self, context):
assert context, "Nothing collected."

View file

@ -22,8 +22,8 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
frames = list(collection.indexes)
current_range = (frames[0], frames[-1])
required_range = (instance.data["startFrame"],
instance.data["endFrame"])
required_range = (instance.data["frameStart"],
instance.data["frameEnd"])
if current_range != required_range:
raise ValueError("Invalid frame range: {0} - "

View file

@ -23,8 +23,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
match = re.match("(\w+)\.(\d+)\.vdb", file_name)
result = file_name
start_frame = instance.data.get("startFrame", None)
end_frame = instance.data.get("endFrame", None)
start_frame = instance.data.get("frameStart", None)
end_frame = instance.data.get("frameEnd", None)
if match and start_frame is not None:

View file

@ -55,7 +55,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
# Create nice name if the instance has a frame range.
label = data.get("name", node.name())
if "startFrame" in data and "endFrame" in data:
if "frameStart" in data and "frameEnd" in data:
frames = "[{startFrame} - {endFrame}]".format(**data)
label = "{} {}".format(label, frames)
@ -91,8 +91,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
if node.evalParm("trange") == 0:
return data
data["startFrame"] = node.evalParm("f1")
data["endFrame"] = node.evalParm("f2")
data["frameStart"] = node.evalParm("f1")
data["frameEnd"] = node.evalParm("f2")
data["steps"] = node.evalParm("f3")
return data

View file

@ -6,10 +6,9 @@ import acre
from avalon import api, lib
import pype.api as pype
from pype.aport import lib as aportlib
from pype.api import Logger
log = Logger().get_logger(__name__, "aport")
log = pype.Logger().get_logger(__name__, "aport")
class Aport(api.Action):
@ -50,15 +49,16 @@ class Aport(api.Action):
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
os.environ["AVALON_WORKDIR"] = pype.get_workdir_template()
os.environ["AVALON_WORKDIR"] = aportlib.get_workdir_template()
env.update(dict(os.environ))
try:
lib.launch(executable=executable,
args=arguments,
environment=env
)
lib.launch(
executable=executable,
args=arguments,
environment=env
)
except Exception as e:
log.error(e)
return

View file

@ -3,7 +3,7 @@ import sys
from pprint import pprint
import acre
from avalon import api, lib
from avalon import api, lib, io
import pype.api as pype
@ -44,12 +44,42 @@ class PremierePro(api.Action):
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
os.environ["AVALON_WORKDIR"] = pype.get_workdir_template()
project_name = env.get("AVALON_PROJECT")
anatomy = Anatomy(project_name)
os.environ['AVALON_PROJECT'] = project_name
io.Session['AVALON_PROJECT'] = project_name
task_name = os.environ.get(
"AVALON_TASK", io.Session["AVALON_TASK"]
)
asset_name = os.environ.get(
"AVALON_ASSET", io.Session["AVALON_ASSET"]
)
application = lib.get_application(
os.environ["AVALON_APP_NAME"]
)
project_doc = io.find_one({"type": "project"})
data = {
"task": task_name,
"asset": asset_name,
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code", '')
},
"hierarchy": pype.get_hierarchy(),
"app": application["application_dir"]
}
anatomy_filled = anatomy.format(data)
workdir = anatomy_filled["work"]["folder"]
os.environ["AVALON_WORKDIR"] = workdir
env.update(dict(os.environ))
lib.launch(executable=executable,
args=arguments,
environment=env
)
lib.launch(
executable=executable,
args=arguments,
environment=env
)
return

View file

@ -13,8 +13,8 @@ class CreateVrayProxy(avalon.maya.Creator):
super(CreateVrayProxy, self).__init__(*args, **kwargs)
self.data["animation"] = False
self.data["startFrame"] = 1
self.data["endFrame"] = 1
self.data["frameStart"] = 1
self.data["frameEnd"] = 1
# Write vertex colors
self.data["vertexColors"] = False

View file

@ -25,8 +25,8 @@ class SetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@ -59,8 +59,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "

View file

@ -106,9 +106,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
data["asset"])
# Append start frame and end frame to label if present
if "startFrame" and "endFrame" in data:
label += " [{0}-{1}]".format(int(data["startFrame"]),
int(data["endFrame"]))
if "frameStart" and "frameEnd" in data:
label += " [{0}-{1}]".format(int(data["frameStart"]),
int(data["frameEnd"]))
instance.data["label"] = label

View file

@ -15,8 +15,8 @@ class CollectMayaAscii(pyblish.api.InstancePlugin):
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame
# make ftrack publishable
if instance.data.get('families'):

View file

@ -22,5 +22,5 @@ class CollectModelData(pyblish.api.InstancePlugin):
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame

View file

@ -64,9 +64,9 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
"subset": layername,
"setMembers": layer,
"publish": True,
"startFrame": self.get_render_attribute("startFrame",
"frameStart": self.get_render_attribute("frameStart",
layer=layer),
"endFrame": self.get_render_attribute("endFrame",
"frameEnd": self.get_render_attribute("frameEnd",
layer=layer),
"byFrameStep": self.get_render_attribute("byFrameStep",
layer=layer),
@ -106,8 +106,8 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
# Define nice label
label = "{0} ({1})".format(layername, data["asset"])
label += " [{0}-{1}]".format(int(data["startFrame"]),
int(data["endFrame"]))
label += " [{0}-{1}]".format(int(data["frameStart"]),
int(data["frameEnd"]))
instance = context.create_instance(layername)
instance.data["label"] = label

View file

@ -54,10 +54,10 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug('adding review family to {}'.format(reviewable_subset))
data['review_camera'] = camera
# data["publish"] = False
data['startFrameReview'] = instance.data['startFrame']
data['endFrameReview'] = instance.data['endFrame']
data['startFrame'] = instance.data['startFrame']
data['endFrame'] = instance.data['endFrame']
data['startFrameReview'] = instance.data["frameStart"]
data['endFrameReview'] = instance.data["frameEnd"]
data["frameStart"] = instance.data["frameStart"]
data["frameEnd"] = instance.data["frameEnd"]
data['handles'] = instance.data['handles']
data['step'] = instance.data['step']
data['fps'] = instance.data['fps']
@ -69,8 +69,8 @@ class CollectReview(pyblish.api.InstancePlugin):
else:
instance.data['subset'] = task + 'Review'
instance.data['review_camera'] = camera
instance.data['startFrameReview'] = instance.data['startFrame']
instance.data['endFrameReview'] = instance.data['endFrame']
instance.data['startFrameReview'] = instance.data["frameStart"]
instance.data['endFrameReview'] = instance.data["frameEnd"]
# make ftrack publishable
instance.data["families"] = ['ftrack']

View file

@ -82,8 +82,8 @@ class CollectVRayScene(pyblish.api.ContextPlugin):
"subset": subset,
"setMembers": layer,
"startFrame": start_frame,
"endFrame": end_frame,
"frameStart": start_frame,
"frameEnd": end_frame,
"renderer": "vray",
"resolution": resolution,
"ext": ".{}".format(extension),

View file

@ -45,8 +45,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
instance.data["resources"] = yeti_resources
# Force frame range for export
instance.data["startFrame"] = 1
instance.data["endFrame"] = 1
instance.data["frameStart"] = 1
instance.data["frameEnd"] = 1
def collect_input_connections(self, instance):
"""Collect the inputs for all nodes in the input_SET"""

View file

@ -35,8 +35,8 @@ class ExtractAnimation(pype.api.Extractor):
fullPath=True) or []
# Collect the start and end including handles
start = instance.data["startFrame"]
end = instance.data["endFrame"]
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles

View file

@ -23,8 +23,8 @@ class ExtractCameraAlembic(pype.api.Extractor):
def process(self, instance):
# get settings
framerange = [instance.data.get("startFrame", 1),
instance.data.get("endFrame", 1)]
framerange = [instance.data.get("frameStart", 1),
instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)

View file

@ -88,8 +88,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
def process(self, instance):
# get settings
framerange = [instance.data.get("startFrame", 1),
instance.data.get("endFrame", 1)]
framerange = [instance.data.get("frameStart", 1),
instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)

View file

@ -166,8 +166,8 @@ class ExtractFBX(pype.api.Extractor):
self.log.info("Export options: {0}".format(options))
# Collect the start and end including handles
start = instance.data["startFrame"]
end = instance.data["endFrame"]
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles

View file

@ -25,8 +25,8 @@ class ExtractAlembic(pype.api.Extractor):
nodes = instance[:]
# Collect the start and end including handles
start = instance.data.get("startFrame", 1)
end = instance.data.get("endFrame", 1)
start = instance.data.get("frameStart", 1)
end = instance.data.get("frameEnd", 1)
handles = instance.data.get("handles", 0)
if handles:
start -= handles

View file

@ -114,9 +114,9 @@ class ExtractQuicktime(pype.api.Extractor):
'ext': 'mov',
'files': collected_frames,
"stagingDir": stagingdir,
'startFrame': start,
'endFrame': end,
'frameRate': fps,
"frameStart": start,
"frameEnd": end,
'fps': fps,
'preview': True,
'tags': ['review', 'delete']
}

View file

@ -28,14 +28,14 @@ class ExtractVRayProxy(pype.api.Extractor):
if not anim_on:
# Remove animation information because it is not required for
# non-animated subsets
instance.data.pop("startFrame", None)
instance.data.pop("endFrame", None)
instance.data.pop("frameStart", None)
instance.data.pop("frameEnd", None)
start_frame = 1
end_frame = 1
else:
start_frame = instance.data["startFrame"]
end_frame = instance.data["endFrame"]
start_frame = instance.data["frameStart"]
end_frame = instance.data["frameEnd"]
vertex_colors = instance.data.get("vertexColors", False)

View file

@ -31,8 +31,8 @@ class ExtractYetiCache(pype.api.Extractor):
data_file = os.path.join(dirname, "yeti.fursettings")
# Collect information for writing cache
start_frame = instance.data.get("startFrame")
end_frame = instance.data.get("endFrame")
start_frame = instance.data.get("frameStart")
end_frame = instance.data.get("frameEnd")
preroll = instance.data.get("preroll")
if preroll > 0:
start_frame -= preroll

View file

@ -182,8 +182,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"),
"Frames": "{start}-{end}x{step}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"]),
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"]),
step=int(instance.data["byFrameStep"]),
),
@ -330,7 +330,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame", "byFrameStep"):
for key in ("frameStart", "frameEnd", "byFrameStep"):
value = instance.data[key]
if int(value) == value:

View file

@ -389,8 +389,8 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
},
"frames_range": {
"value": "{start}-{end}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"])),
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])),
"state": True,
"subst": False
},
@ -539,7 +539,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame", "byFrameStep"):
for key in ("frameStart", "frameEnd", "byFrameStep"):
value = instance.data[key]
if int(value) == value:

View file

@ -51,8 +51,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
filename,
vrscene_output)
start_frame = int(instance.data["startFrame"])
end_frame = int(instance.data["endFrame"])
start_frame = int(instance.data["frameStart"])
end_frame = int(instance.data["frameEnd"])
# Primary job
self.log.info("Submitting export job ..")
@ -123,8 +123,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
self.log.info("Submitting render job ..")
start_frame = int(instance.data["startFrame"])
end_frame = int(instance.data["endFrame"])
start_frame = int(instance.data["frameStart"])
end_frame = int(instance.data["frameEnd"])
ext = instance.data.get("ext", "exr")
# Create output directory for renders
@ -215,8 +215,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
return cmd.format(project=instance.context.data["workspaceDir"],
cam=cammera,
startFrame=instance.data["startFrame"],
endFrame=instance.data["endFrame"],
startFrame=instance.data["frameStart"],
endFrame=instance.data["frameEnd"],
layer=instance.name)
def build_jobinfo_environment(self, env):
@ -266,7 +266,7 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
if dir:
return output_path.replace("\\", "/")
start_frame = int(instance.data["startFrame"])
start_frame = int(instance.data["frameStart"])
filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame)
result = filename_zero.replace("\\", "/")

View file

@ -25,8 +25,8 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
def process(self, instance):
start = instance.data.get("startFrame", None)
end = instance.data.get("endFrame", None)
start = instance.data.get("frameStart", None)
end = instance.data.get("frameEnd", None)
handles = instance.data.get("handles", None)
# Check if any of the values are present

View file

@ -51,8 +51,8 @@ class ValidateInstancerFrameRanges(pyblish.api.InstancePlugin):
import pyseq
start_frame = instance.data.get("startFrame", 0)
end_frame = instance.data.get("endFrame", 0)
start_frame = instance.data.get("frameStart", 0)
end_frame = instance.data.get("frameEnd", 0)
required = range(int(start_frame), int(end_frame) + 1)
invalid = list()

View file

@ -21,7 +21,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
angularunits = context.data('angularUnits')
fps = context.data['fps']
asset_fps = lib.get_asset_fps()
asset_fps = lib.get_asset()["data"]["fps"]
self.log.info('Units (linear): {0}'.format(linearunits))
self.log.info('Units (angular): {0}'.format(angularunits))
@ -50,5 +50,5 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
cls.log.debug(current_linear)
cls.log.info("Setting time unit to match project")
asset_fps = lib.get_asset_fps()
asset_fps = lib.get_asset()["data"]["fps"]
mayalib.set_scene_fps(asset_fps)

View file

@ -23,5 +23,5 @@ class ValidateVrayProxy(pyblish.api.InstancePlugin):
cls.log.error("'%s' is empty! This is a bug" % instance.name)
if data["animation"]:
if data["endFrame"] < data["startFrame"]:
if data["frameEnd"] < data["frameStart"]:
cls.log.error("End frame is smaller than start frame")

View file

@ -53,8 +53,8 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
# The instance has most of the information already stored
metadata = {
"regex": regex,
"startFrame": instance.context.data["startFrame"],
"endFrame": instance.context.data["endFrame"],
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}

View file

@ -78,8 +78,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"])
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
),
"Comment": comment,

View file

@ -19,10 +19,10 @@ class ValidateSettingsNuke(pyblish.api.Validator):
asset = io.find_one({"name": os.environ['AVALON_ASSET']})
try:
avalon_resolution = asset["data"].get("resolution", '')
avalon_pixel_aspect = asset["data"].get("pixel_aspect", '')
avalon_pixel_aspect = asset["data"].get("pixelAspect", '')
avalon_fps = asset["data"].get("fps", '')
avalon_first = asset["data"].get("edit_in", '')
avalon_last = asset["data"].get("edit_out", '')
avalon_first = asset["data"].get("frameStart", '')
avalon_last = asset["data"].get("frameEnd", '')
avalon_crop = asset["data"].get("crop", '')
except KeyError:
print(

View file

@ -125,6 +125,24 @@ class CreateWritePrerender(avalon.nuke.Creator):
write_data.update({
"fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"})
create_write_node(self.data["subset"], write_data)
# get group node
group_node = create_write_node(self.data["subset"], write_data)
# open group node
group_node.begin()
for n in nuke.allNodes():
# get write node
if n.Class() in "Write":
write_node = n
group_node.end()
# linking knobs to group property panel
linking_knobs = ["first", "last", "use_limit"]
for k in linking_knobs:
lnk = nuke.Link_Knob(k)
lnk.makeLink(write_node.name(), k)
lnk.setName(k.replace('_', ' ').capitalize())
lnk.clearFlag(nuke.STARTLINE)
group_node.addKnob(lnk)
return

View file

@ -30,8 +30,8 @@ class SetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
log.info("start: {}, end: {}".format(start, end))
if start is None or end is None:
@ -64,8 +64,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "

View file

@ -76,7 +76,7 @@ class LoadMov(api.Loader):
"""Load mov file into Nuke"""
families = ["write", "source", "plate", "render", "review"]
representations = ["mov", "preview", "review", "mp4"]
representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"]
label = "Load mov"
order = -10
@ -92,8 +92,8 @@ class LoadMov(api.Loader):
version = context['version']
version_data = version.get("data", {})
orig_first = version_data.get("startFrame", None)
orig_last = version_data.get("endFrame", None)
orig_first = version_data.get("frameStart", None)
orig_last = version_data.get("frameEnd", None)
diff = orig_first - 1
# set first to 1
first = orig_first - diff
@ -141,7 +141,7 @@ class LoadMov(api.Loader):
read_node["frame"].setValue(str(offset_frame))
# add additional metadata from the version to imprint to Avalon knob
add_keys = [
"startFrame", "endFrame", "handles", "source", "author",
"frameStart", "frameEnd", "handles", "source", "author",
"fps", "version", "handleStart", "handleEnd"
]
@ -207,8 +207,8 @@ class LoadMov(api.Loader):
version_data = version.get("data", {})
orig_first = version_data.get("startFrame", None)
orig_last = version_data.get("endFrame", None)
orig_first = version_data.get("frameStart", None)
orig_last = version_data.get("frameEnd", None)
diff = orig_first - 1
# set first to 1
first = orig_first - diff
@ -250,8 +250,8 @@ class LoadMov(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"startFrame": version_data.get("startFrame"),
"endFrame": version_data.get("endFrame"),
"frameStart": version_data.get("frameStart"),
"frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"source": version_data.get("source"),
"handles": version_data.get("handles"),

View file

@ -1,22 +1,18 @@
from avalon import api, style, io
from pype.nuke.lib import get_avalon_knob_data
from avalon.nuke import get_avalon_knob_data
import nuke
import os
from pype.api import Logger
log = Logger().get_logger(__name__, "nuke")
class LinkAsGroup(api.Loader):
"""Copy the published file to be pasted at the desired location"""
representations = ["nk"]
families = ["*"]
families = ["workfile"]
label = "Load Precomp"
order = 10
order = 0
icon = "file"
color = style.colors.dark
color = style.colors.alert
def load(self, context, name, namespace, data):
@ -27,8 +23,8 @@ class LinkAsGroup(api.Loader):
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
# Fallback to asset name when namespace is None
if namespace is None:
@ -41,17 +37,14 @@ class LinkAsGroup(api.Loader):
self.log.info("versionData: {}\n".format(context["version"]["data"]))
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("startFrame", None)
self.log.info("start: {}\n".format(start))
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["startFrame", "endFrame", "handle_start", "handle_end", "source", "author", "fps"]
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {
"start_frame": start,
"fstart": first,
"fend": last,
"startingFrame": first,
"frameStart": first,
"frameEnd": last,
"version": vname
}
for k in add_keys:
@ -70,7 +63,6 @@ class LinkAsGroup(api.Loader):
colorspace = context["version"]["data"].get("colorspace", None)
self.log.info("colorspace: {}\n".format(colorspace))
# ['version', 'file', 'reading', 'output', 'useOutput']
P["name"].setValue("{}_{}".format(name, namespace))
@ -79,7 +71,7 @@ class LinkAsGroup(api.Loader):
with P:
# iterate trough all nodes in group node and find pype writes
writes = [n.name() for n in nuke.allNodes()
if n.Class() == "Write"
if n.Class() == "Group"
if get_avalon_knob_data(n)]
# create panel for selecting output
@ -87,7 +79,7 @@ class LinkAsGroup(api.Loader):
panel_label = "Select write node for output"
p = nuke.Panel("Select Write Node")
p.addEnumerationPulldown(
panel_label, panel_choices)
panel_label, panel_choices)
p.show()
P["output"].setValue(p.value(panel_label))
@ -119,7 +111,7 @@ class LinkAsGroup(api.Loader):
node = nuke.toNode(container['objectName'])
root = api.get_representation_path(representation).replace("\\","/")
root = api.get_representation_path(representation).replace("\\", "/")
# Get start frame from version data
version = io.find_one({
@ -138,7 +130,7 @@ class LinkAsGroup(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"endFrame": version["data"].get("endFrame"),
"frameEnd": version["data"].get("frameEnd"),
"version": version.get("name"),
"colorspace": version["data"].get("colorspace"),
"source": version["data"].get("source"),
@ -162,8 +154,7 @@ class LinkAsGroup(api.Loader):
else:
node["tile_color"].setValue(int("0xff0ff0ff", 16))
log.info("udated to version: {}".format(version.get("name")))
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop

View file

@ -92,8 +92,10 @@ class LoadSequence(api.Loader):
version = context['version']
version_data = version.get("data", {})
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
log.info("version_data: {}\n".format(version_data))
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
handles = version_data.get("handles", 0)
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
@ -103,9 +105,9 @@ class LoadSequence(api.Loader):
handle_start = handles
handle_end = handles
# create handles offset
first -= handle_start
last += handle_end
# # create handles offset
# first -= handle_start
# last += handle_end
# Fallback to asset name when namespace is None
if namespace is None:
@ -136,7 +138,7 @@ class LoadSequence(api.Loader):
r["last"].setValue(int(last))
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["startFrame", "endFrame", "handles",
add_keys = ["frameStart", "frameEnd", "handles",
"source", "colorspace", "author", "fps", "version",
"handleStart", "handleEnd"]
@ -198,8 +200,8 @@ class LoadSequence(api.Loader):
version_data = version.get("data", {})
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
handles = version_data.get("handles", 0)
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
@ -226,16 +228,16 @@ class LoadSequence(api.Loader):
# Set the global in to the start frame of the sequence
loader_shift(node, first, relative=True)
node["origfirst"].setValue(first)
node["first"].setValue(first)
node["origlast"].setValue(last)
node["last"].setValue(last)
node["origfirst"].setValue(int(first))
node["first"].setValue(int(first))
node["origlast"].setValue(int(last))
node["last"].setValue(int(last))
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"startFrame": version_data.get("startFrame"),
"endFrame": version_data.get("endFrame"),
"frameStart": version_data.get("frameStart"),
"frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),

View file

@ -12,3 +12,4 @@ class CollectActiveViewer(pyblish.api.ContextPlugin):
def process(self, context):
context.data["ViewerProcess"] = nuke.ViewerProcess.node()
context.data["ActiveViewer"] = nuke.activeViewer()

View file

@ -19,3 +19,5 @@ class CollectAssetInfo(pyblish.api.ContextPlugin):
self.log.info("asset_data: {}".format(asset_data))
context.data['handles'] = int(asset_data["data"].get("handles", 0))
context.data["handleStart"] = int(asset_data["data"].get("handleStart", 0))
context.data["handleEnd"] = int(asset_data["data"].get("handleEnd", 0))

View file

@ -14,5 +14,4 @@ class CollectFramerate(pyblish.api.ContextPlugin):
]
def process(self, context):
context.data["framerate"] = nuke.root()["fps"].getValue()
context.data["fps"] = nuke.root()["fps"].getValue()

View file

@ -3,7 +3,7 @@ import os
import nuke
import pyblish.api
from avalon import io, api
from avalon.nuke.lib import get_avalon_knob_data
from avalon.nuke import get_avalon_knob_data
@pyblish.api.log
@ -18,23 +18,26 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
# add handles into context
context.data['handles'] = context.data['handles']
self.log.debug("asset_data: {}".format(asset_data["data"]))
instances = []
# creating instances per write node
for node in nuke.allNodes():
self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes()))
for node in nuke.allNodes():
try:
if node["disable"].value():
continue
except Exception:
except Exception as E:
self.log.warning(E)
continue
# get data from avalon knob
self.log.debug("node[name]: {}".format(node['name'].value()))
avalon_knob_data = get_avalon_knob_data(node)
self.log.debug("avalon_knob_data: {}".format(avalon_knob_data))
if not avalon_knob_data:
continue

View file

@ -99,8 +99,8 @@ class CollectNukeReads(pyblish.api.ContextPlugin):
"stagingDir": source_dir,
"ext": ext,
"label": label,
"startFrame": first_frame,
"endFrame": last_frame,
"frameStart": first_frame,
"frameEnd": last_frame,
"colorspace": node["colorspace"].value(),
"handles": int(asset_data["data"].get("handles", 0)),
"step": 1,

View file

@ -38,8 +38,8 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
first_frame = int(root["first_frame"].getValue())
last_frame = int(root["last_frame"].getValue())
handle_start = int(knob_data.get("handle_start", 0))
handle_end = int(knob_data.get("handle_end", 0))
handle_start = int(knob_data.get("handleStart", 0))
handle_end = int(knob_data.get("handleEnd", 0))
# Get format
format = root['format'].value()
@ -54,17 +54,17 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
script_data = {
"asset": os.getenv("AVALON_ASSET", None),
"version": version,
"startFrame": first_frame + handle_start,
"endFrame": last_frame - handle_end,
"resolution_width": resolution_width,
"resolution_height": resolution_height,
"pixel_aspect": pixel_aspect,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect,
# backward compatibility
"handles": handle_start,
"handle_start": handle_start,
"handle_end": handle_end,
"handleStart": handle_start,
"handleEnd": handle_end,
"step": 1,
"fps": root['fps'].value(),
}

View file

@ -34,7 +34,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_type = "mov"
# Get frame range
handles = instance.context.data.get('handles', 0)
handles = instance.context.data['handles']
handle_start = instance.context.data["handleStart"]
handle_end = instance.context.data["handleEnd"]
first_frame = int(nuke.root()["first_frame"].getValue())
last_frame = int(nuke.root()["last_frame"].getValue())
@ -85,14 +87,29 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
if 'render.local' in instance.data['families']:
instance.data['families'].append('ftrack')
# Add version data to instance
version_data = {
"handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame,
"frameEnd": last_frame,
"version": int(version),
"colorspace": node["colorspace"].value(),
"families": [instance.data["family"]],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data.update({
"versionData": version_data,
"path": path,
"outputDir": output_dir,
"ext": ext,
"label": label,
"handles": handles,
"startFrame": first_frame,
"endFrame": last_frame,
"frameStart": first_frame,
"frameEnd": last_frame,
"outputType": output_type,
"colorspace": node["colorspace"].value(),
})

View file

@ -0,0 +1,42 @@
import nuke
import pyblish.api
from avalon.nuke import maintained_selection
class CreateOutputNode(pyblish.api.ContextPlugin):
"""Adding output node for each ouput write node
So when latly user will want to Load .nk as LifeGroup or Precomp
Nuke will not complain about missing Output node
"""
label = 'Output Node Create'
order = pyblish.api.ExtractorOrder + 0.4
families = ["workfile"]
hosts = ['nuke']
def process(self, context):
# capture selection state
with maintained_selection():
# deselect all allNodes
self.log.info(context.data["ActiveViewer"])
active_viewer = context.data["ActiveViewer"]
active_input = active_viewer.activeInput()
active_node = active_viewer.node()
last_viewer_node = active_node.input(active_input)
name = last_viewer_node.name()
self.log.info("Node name: {}".format(name))
# select only instance render node
last_viewer_node['selected'].setValue(True)
output_node = nuke.createNode("Output")
# deselect all and select the original selection
output_node['selected'].setValue(False)
# save script
nuke.scriptSave()
# add node to instance node list
context.data["outputNode"] = output_node

View file

@ -1,91 +0,0 @@
import os
import json
import datetime
import time
import clique
from pprint import pformat
import pyblish.api
class ExtractJSON(pyblish.api.ContextPlugin):
""" Extract all instances to a serialized json file. """
order = pyblish.api.IntegratorOrder + 1
label = "Extract to JSON"
families = ["write"]
def process(self, context):
workspace = os.path.join(
os.path.dirname(context.data["currentFile"]), "workspace",
"instances")
if not os.path.exists(workspace):
os.makedirs(workspace)
context_data = context.data.copy()
unwrapped_instance = []
for i in context_data["instances"]:
unwrapped_instance.append(i.data)
context_data["instances"] = unwrapped_instance
timestamp = datetime.datetime.fromtimestamp(
time.time()).strftime("%Y%m%d-%H%M%S")
filename = timestamp + "_instances.json"
with open(os.path.join(workspace, filename), "w") as outfile:
outfile.write(pformat(context_data, depth=20))
def serialize(self, data):
"""
Convert all nested content to serialized objects
Args:
data (dict): nested data
Returns:
dict: nested data
"""
def encoding_obj(value):
try:
value = str(value).replace("\\", "/")
# value = getattr(value, '__dict__', str(value))
except Exception:
pass
return value
for key, value in dict(data).items():
if key in ["records", "instances", "results"]:
# escape all record objects
data[key] = None
continue
if hasattr(value, '__module__'):
# only deals with module objects
if "plugins" in value.__module__:
# only dealing with plugin objects
data[key] = str(value.__module__)
else:
if ".lib." in value.__module__:
# will allow only anatomy dict
data[key] = self.serialize(value)
else:
data[key] = None
continue
continue
if isinstance(value, dict):
# loops if dictionary
data[key] = self.serialize(value)
if isinstance(value, (list or tuple)):
# loops if list or tuple
for i, item in enumerate(value):
value[i] = self.serialize(item)
data[key] = value
data[key] = encoding_obj(value)
return data

View file

@ -27,8 +27,8 @@ class NukeRenderLocal(pype.api.Extractor):
self.log.debug("instance collected: {}".format(instance.data))
first_frame = instance.data.get("startFrame", None)
last_frame = instance.data.get("endFrame", None)
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
node_subset_name = instance.data.get("name", None)
self.log.info("Starting render")

View file

@ -67,8 +67,8 @@ class ExtractReviewData(pype.api.Extractor):
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("startFrame", None)
last_frame = instance.data.get("endFrame", None)
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
node = previous_node = nuke.createNode("Read")
@ -149,8 +149,8 @@ class ExtractReviewData(pype.api.Extractor):
'ext': representation,
'files': file,
"stagingDir": stagingDir,
"startFrame": first_frame,
"endFrame": last_frame,
"frameStart": first_frame,
"frameEnd": last_frame,
"anatomy_template": "render",
"tags": tags
}

View file

@ -0,0 +1,22 @@
import nuke
import pyblish.api
class RemoveOutputNode(pyblish.api.ContextPlugin):
"""Removing output node for each ouput write node
"""
label = 'Output Node Remove'
order = pyblish.api.IntegratorOrder
families = ["workfile"]
hosts = ['nuke']
def process(self, context):
try:
output_node = context.data["outputNode"]
name = output_node["name"].value()
self.log.info("Removing output node: '{}'".format(name))
nuke.delete(output_node)
except Exception:
return

View file

@ -29,7 +29,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
# root = nuke.root()
# node_subset_name = instance.data.get("name", None)
node = instance[0]
node = instance[1]
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
@ -77,8 +77,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"])
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
),
"Comment": comment,
@ -199,7 +199,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame"):
for key in ("frameStart", "frameEnd"):
value = instance.data[key]
if int(value) == value:

Some files were not shown because too many files have changed in this diff Show more