Merged in mergeTest (pull request #252)

Feature/PYPE-456_PYPE-401-attribute_and_lib_cleanup

Approved-by: Milan Kolar <milan@orbi.tools>
This commit is contained in:
Milan Kolar 2019-08-02 17:54:03 +00:00
commit f280477622
104 changed files with 1079 additions and 1955 deletions

View file

@ -18,29 +18,14 @@ from .action import (
from pypeapp import Logger
from .templates import (
get_project_name,
get_project_code,
get_hierarchy,
get_asset,
get_task,
set_avalon_workdir,
get_version_from_path,
get_workdir_template,
set_hierarchy,
set_project_code
)
from .lib import (
version_up,
get_handle_irregular,
get_project_data,
get_asset_data,
get_asset,
get_project,
get_hierarchy,
get_version_from_path,
modified_environ,
add_tool_to_environment,
get_data_hierarchical_attr,
get_avalon_project_template
add_tool_to_environment
)
# Special naming case for subprocess since its a built-in method.
@ -65,23 +50,12 @@ __all__ = [
# get contextual data
"version_up",
"get_handle_irregular",
"get_project_data",
"get_asset_data",
"get_project_name",
"get_project_code",
"get_project",
"get_hierarchy",
"get_asset",
"get_task",
"set_avalon_workdir",
"get_version_from_path",
"get_workdir_template",
"modified_environ",
"add_tool_to_environment",
"set_hierarchy",
"set_project_code",
"get_data_hierarchical_attr",
"get_avalon_project_template",
"subprocess"
"subprocess"
]

View file

@ -6,6 +6,7 @@ from pyblish import api as pyblish
from pypeapp import execute, Logger
from .. import api
from .lib import set_avalon_workdir
log = Logger().get_logger(__name__, "aport")
@ -33,7 +34,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "aport", "inventory")
def install():
api.set_avalon_workdir()
set_avalon_workdir()
log.info("Registering Aport plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)

View file

@ -80,17 +80,23 @@ def publish(json_data_path, gui):
@pico.expose()
def context(project, asset, task, app):
def context(project_name, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
os.environ["AVALON_PROJECT"] = project_name
io.Session["AVALON_PROJECT"] = project_name
avalon.update_current_task(task, asset, app)
project_code = pype.get_project_code()
pype.set_project_code(project_code)
project_code = pype.get_project()["data"].get("code", '')
os.environ["AVALON_PROJECTCODE"] = project_code
io.Session["AVALON_PROJECTCODE"] = project_code
hierarchy = pype.get_hierarchy()
pype.set_hierarchy(hierarchy)
os.environ["AVALON_HIERARCHY"] = hierarchy
io.Session["AVALON_HIERARCHY"] = hierarchy
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)

135
pype/aport/lib.py Normal file
View file

@ -0,0 +1,135 @@
import os
import re
import sys
from avalon import io, api as avalon, lib as avalonlib
from pype import lib
from pype import api as pype
# from pypeapp.api import (Templates, Logger, format)
from pypeapp import Logger, Anatomy
log = Logger().get_logger(__name__, os.getenv("AVALON_APP", "pype-config"))
def get_asset():
"""
Obtain Asset string from session or environment variable
Returns:
string: asset name
Raises:
log: error
"""
lib.set_io_database()
asset = io.Session.get("AVALON_ASSET", None) \
or os.getenv("AVALON_ASSET", None)
log.info("asset: {}".format(asset))
assert asset, log.error("missing `AVALON_ASSET`"
"in avalon session "
"or os.environ!")
return asset
def get_context_data(
project_name=None, hierarchy=None, asset=None, task_name=None
):
"""
Collect all main contextual data
Args:
project (string, optional): project name
hierarchy (string, optional): hierarchy path
asset (string, optional): asset name
task (string, optional): task name
Returns:
dict: contextual data
"""
if not task_name:
lib.set_io_database()
task_name = io.Session.get("AVALON_TASK", None) \
or os.getenv("AVALON_TASK", None)
assert task_name, log.error(
"missing `AVALON_TASK` in avalon session or os.environ!"
)
application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
os.environ['AVALON_PROJECT'] = project_name
io.Session['AVALON_PROJECT'] = project_name
if not hierarchy:
hierarchy = pype.get_hierarchy()
project_doc = io.find_one({"type": "project"})
data = {
"task": task_name,
"asset": asset or get_asset(),
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code", '')
},
"hierarchy": hierarchy,
"app": application["application_dir"]
}
return data
def set_avalon_workdir(
project=None, hierarchy=None, asset=None, task=None
):
"""
Updates os.environ and session with filled workdir
Args:
project (string, optional): project name
hierarchy (string, optional): hierarchy path
asset (string, optional): asset name
task (string, optional): task name
Returns:
os.environ[AVALON_WORKDIR]: workdir path
avalon.session[AVALON_WORKDIR]: workdir path
"""
lib.set_io_database()
awd = io.Session.get("AVALON_WORKDIR", None) or \
os.getenv("AVALON_WORKDIR", None)
data = get_context_data(project, hierarchy, asset, task)
if (not awd) or ("{" not in awd):
anatomy_filled = Anatomy(io.Session["AVALON_PROJECT"]).format(data)
awd = anatomy_filled["work"]["folder"]
awd_filled = os.path.normpath(format(awd, data))
io.Session["AVALON_WORKDIR"] = awd_filled
os.environ["AVALON_WORKDIR"] = awd_filled
log.info("`AVALON_WORKDIR` fixed to: {}".format(awd_filled))
def get_workdir_template(data=None):
"""
Obtain workdir templated path from Anatomy()
Args:
data (dict, optional): basic contextual data
Returns:
string: template path
"""
anatomy = Anatomy()
anatomy_filled = anatomy.format(data or get_context_data())
try:
work = anatomy_filled["work"]
except Exception as e:
log.error(
"{0} Error in get_workdir_template(): {1}".format(__name__, str(e))
)
return work

View file

@ -82,13 +82,19 @@ def context(project, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
io.Session["AVALON_PROJECT"] = project
avalon.update_current_task(task, asset, app)
project_code = pype.get_project_code()
pype.set_project_code(project_code)
project_code = pype.get_project()["data"].get("code", '')
os.environ["AVALON_PROJECTCODE"] = project_code
io.Session["AVALON_PROJECTCODE"] = project_code
hierarchy = pype.get_hierarchy()
pype.set_hierarchy(hierarchy)
os.environ["AVALON_HIERARCHY"] = hierarchy
io.Session["AVALON_HIERARCHY"] = hierarchy
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)

View file

@ -81,13 +81,19 @@ def context(project, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
io.Session["AVALON_PROJECT"] = project
avalon.update_current_task(task, asset, app)
project_code = pype.get_project_code()
pype.set_project_code(project_code)
project_code = pype.get_project()["data"].get("code", '')
os.environ["AVALON_PROJECTCODE"] = project_code
io.Session["AVALON_PROJECTCODE"] = project_code
hierarchy = pype.get_hierarchy()
pype.set_hierarchy(hierarchy)
os.environ["AVALON_HIERARCHY"] = hierarchy
io.Session["AVALON_HIERARCHY"] = hierarchy
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)

View file

@ -0,0 +1,283 @@
import os
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
class AttributesRemapper(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = 'attributes.remapper'
#: Action label.
label = 'Attributes Remapper'
#: Action description.
description = 'Remaps attributes in avalon DB'
#: roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator"]
icon = '{}/ftrack/action_icons/AttributesRemapper.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
db_con = DbConnector()
keys_to_change = {
"fstart": "frameStart",
"startFrame": "frameStart",
"edit_in": "frameStart",
"fend": "frameEnd",
"endFrame": "frameEnd",
"edit_out": "frameEnd",
"handle_start": "handleStart",
"handle_end": "handleEnd",
"handles": ["handleEnd", "handleStart"],
"frameRate": "fps",
"framerate": "fps",
"resolution_width": "resolutionWidth",
"resolution_height": "resolutionHeight",
"pixel_aspect": "pixelAspect"
}
def discover(self, session, entities, event):
''' Validation '''
return True
def interface(self, session, entities, event):
if event['data'].get('values', {}):
return
title = 'Select Projects where attributes should be remapped'
items = []
selection_enum = {
'label': 'Process type',
'type': 'enumerator',
'name': 'process_type',
'data': [
{
'label': 'Selection',
'value': 'selection'
}, {
'label': 'Inverted selection',
'value': 'except'
}
],
'value': 'selection'
}
selection_label = {
'type': 'label',
'value': (
'Selection based variants:<br/>'
'- `Selection` - '
'NOTHING is processed when nothing is selected<br/>'
'- `Inverted selection` - '
'ALL Projects are processed when nothing is selected'
)
}
items.append(selection_enum)
items.append(selection_label)
item_splitter = {'type': 'label', 'value': '---'}
all_projects = session.query('Project').all()
for project in all_projects:
item_label = {
'type': 'label',
'value': '{} (<i>{}</i>)'.format(
project['full_name'], project['name']
)
}
item = {
'name': project['id'],
'type': 'boolean',
'value': False
}
if len(items) > 0:
items.append(item_splitter)
items.append(item_label)
items.append(item)
if len(items) == 0:
return {
'success': False,
'message': 'Didn\'t found any projects'
}
else:
return {
'items': items,
'title': title
}
def launch(self, session, entities, event):
if 'values' not in event['data']:
return
values = event['data']['values']
process_type = values.pop('process_type')
selection = True
if process_type == 'except':
selection = False
interface_messages = {}
projects_to_update = []
for project_id, update_bool in values.items():
if not update_bool and selection:
continue
if update_bool and not selection:
continue
project = session.query(
'Project where id is "{}"'.format(project_id)
).one()
projects_to_update.append(project)
if not projects_to_update:
self.log.debug('Nothing to update')
return {
'success': True,
'message': 'Nothing to update'
}
self.db_con.install()
relevant_types = ["project", "asset", "version"]
for ft_project in projects_to_update:
self.log.debug(
"Processing project \"{}\"".format(ft_project["full_name"])
)
self.db_con.Session["AVALON_PROJECT"] = ft_project["full_name"]
project = self.db_con.find_one({'type': 'project'})
if not project:
key = "Projects not synchronized to db"
if key not in interface_messages:
interface_messages[key] = []
interface_messages[key].append(ft_project["full_name"])
continue
# Get all entities in project collection from MongoDB
_entities = self.db_con.find({})
for _entity in _entities:
ent_t = _entity.get("type", "*unknown type")
name = _entity.get("name", "*unknown name")
self.log.debug(
"- {} ({})".format(name, ent_t)
)
# Skip types that do not store keys to change
if ent_t.lower() not in relevant_types:
self.log.debug("-- skipping - type is not relevant")
continue
# Get data which will change
updating_data = {}
source_data = _entity["data"]
for key_from, key_to in self.keys_to_change.items():
# continue if final key already exists
if type(key_to) == list:
for key in key_to:
# continue if final key was set in update_data
if key in updating_data:
continue
# continue if source key not exist or value is None
value = source_data.get(key_from)
if value is None:
continue
self.log.debug(
"-- changing key {} to {}".format(
key_from,
key
)
)
updating_data[key] = value
else:
if key_to in source_data:
continue
# continue if final key was set in update_data
if key_to in updating_data:
continue
# continue if source key not exist or value is None
value = source_data.get(key_from)
if value is None:
continue
self.log.debug(
"-- changing key {} to {}".format(key_from, key_to)
)
updating_data[key_to] = value
# Pop out old keys from entity
is_obsolete = False
for key in self.keys_to_change:
if key not in source_data:
continue
is_obsolete = True
source_data.pop(key)
# continue if there is nothing to change
if not is_obsolete and not updating_data:
self.log.debug("-- nothing to change")
continue
source_data.update(updating_data)
self.db_con.update_many(
{"_id": _entity["_id"]},
{"$set": {"data": source_data}}
)
self.db_con.uninstall()
if interface_messages:
self.show_interface_from_dict(
event, interface_messages, "Errors during remapping attributes"
)
return True
def show_interface_from_dict(self, event, messages, title=""):
items = []
for key, value in messages.items():
if not value:
continue
subtitle = {'type': 'label', 'value': '# {}'.format(key)}
items.append(subtitle)
if isinstance(value, list):
for item in value:
message = {
'type': 'label', 'value': '<p>{}</p>'.format(item)
}
items.append(message)
else:
message = {'type': 'label', 'value': '<p>{}</p>'.format(value)}
items.append(message)
self.show_interface(event, items, title)
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
AttributesRemapper(session).register()

View file

@ -7,6 +7,7 @@ import logging
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, get_ca_mongoid
from pypeapp import config
from ftrack_api.exception import NoResultFoundError
"""
This action creates/updates custom attributes.
@ -118,18 +119,11 @@ class CustomAttributes(BaseAction):
os.environ.get('PYPE_STATICS_SERVER', '')
)
def __init__(self, session):
super().__init__(session)
self.types = {}
self.object_type_ids = {}
self.groups = {}
self.security_roles = {}
self.required_keys = ['key', 'label', 'type']
self.type_posibilities = [
'text', 'boolean', 'date', 'enumerator',
'dynamic enumerator', 'number'
]
required_keys = ['key', 'label', 'type']
type_posibilities = [
'text', 'boolean', 'date', 'enumerator',
'dynamic enumerator', 'number'
]
def discover(self, session, entities, event):
'''
@ -139,8 +133,12 @@ class CustomAttributes(BaseAction):
return True
def launch(self, session, entities, event):
# JOB SETTINGS
self.types = {}
self.object_type_ids = {}
self.groups = {}
self.security_roles = {}
# JOB SETTINGS
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
@ -159,11 +157,14 @@ class CustomAttributes(BaseAction):
job['status'] = 'done'
session.commit()
except Exception as e:
except Exception as exc:
session.rollback()
job['status'] = 'failed'
session.commit()
self.log.error('Creating custom attributes failed ({})'.format(e))
self.log.error(
'Creating custom attributes failed ({})'.format(exc),
exc_info=True
)
return True
@ -226,24 +227,30 @@ class CustomAttributes(BaseAction):
def custom_attributes_from_file(self, session, event):
presets = config.get_presets()['ftrack']['ftrack_custom_attributes']
for cust_attr_name in presets:
for cust_attr_data in presets:
cust_attr_name = cust_attr_data.get(
'label',
cust_attr_data.get('key')
)
try:
data = {}
cust_attr = presets[cust_attr_name]
# Get key, label, type
data.update(self.get_required(cust_attr))
data.update(self.get_required(cust_attr_data))
# Get hierachical/ entity_type/ object_id
data.update(self.get_entity_type(cust_attr))
data.update(self.get_entity_type(cust_attr_data))
# Get group, default, security roles
data.update(self.get_optional(cust_attr))
data.update(self.get_optional(cust_attr_data))
# Process data
self.process_attribute(data)
except CustAttrException as cae:
msg = 'Custom attribute error "{}" - {}'.format(
cust_attr_name, str(cae)
)
self.log.warning(msg)
if cust_attr_name:
msg = 'Custom attribute error "{}" - {}'.format(
cust_attr_name, str(cae)
)
else:
msg = 'Custom attribute error - {}'.format(str(cae))
self.log.warning(msg, exc_info=True)
self.show_message(event, msg)
return True
@ -422,9 +429,10 @@ class CustomAttributes(BaseAction):
def get_security_role(self, security_roles):
roles = []
if len(security_roles) == 0 or security_roles[0] == 'ALL':
security_roles_lowered = [role.lower() for role in security_roles]
if len(security_roles) == 0 or 'all' in security_roles_lowered:
roles = self.get_role_ALL()
elif security_roles[0] == 'except':
elif security_roles_lowered[0] == 'except':
excepts = security_roles[1:]
all = self.get_role_ALL()
for role in all:
@ -443,10 +451,10 @@ class CustomAttributes(BaseAction):
role = self.session.query(query).one()
self.security_roles[role_name] = role
roles.append(role)
except Exception:
raise CustAttrException(
'Securit role "{}" does not exist'.format(role_name)
)
except NoResultFoundError:
raise CustAttrException((
'Securit role "{}" does not exist'
).format(role_name))
return roles

View file

@ -1,14 +1,13 @@
import os
import re
import json
from pype import lib as pypelib
from pype.lib import get_avalon_database
from bson.objectid import ObjectId
import avalon
import avalon.api
from avalon import schema
from avalon.vendor import toml, jsonschema
from pypeapp import Logger
from pypeapp import Logger, Anatomy, config
ValidationError = jsonschema.ValidationError
@ -53,8 +52,8 @@ def import_to_avalon(
if entity_type in ['Project']:
type = 'project'
config = get_project_config(entity)
schema.validate(config)
proj_config = get_project_config(entity)
schema.validate(proj_config)
av_project_code = None
if av_project is not None and 'code' in av_project['data']:
@ -62,13 +61,12 @@ def import_to_avalon(
ft_project_code = ft_project['name']
if av_project is None:
project_schema = pypelib.get_avalon_project_template_schema()
item = {
'schema': project_schema,
'schema': "avalon-core:project-2.0",
'type': type,
'name': project_name,
'data': dict(),
'config': config,
'config': proj_config,
'parent': None,
}
schema.validate(item)
@ -214,9 +212,8 @@ def import_to_avalon(
{'type': 'asset', 'name': name}
)
if avalon_asset is None:
asset_schema = pypelib.get_avalon_asset_template_schema()
item = {
'schema': asset_schema,
'schema': "avalon-core:asset-2.0",
'name': name,
'silo': silo,
'parent': ObjectId(projectId),
@ -345,13 +342,12 @@ def changeability_check_childs(entity):
childs = entity['children']
for child in childs:
if child.entity_type.lower() == 'task':
config = get_config_data()
if 'sync_to_avalon' in config:
config = config['sync_to_avalon']
if 'statuses_name_change' in config:
available_statuses = config['statuses_name_change']
else:
available_statuses = []
available_statuses = config.get_presets().get(
"ftrack", {}).get(
"ftrack_config", {}).get(
"sync_to_avalon", {}).get(
"statuses_name_change", []
)
ent_status = child['status']['name'].lower()
if ent_status not in available_statuses:
return False
@ -480,14 +476,28 @@ def get_avalon_project(ft_project):
return avalon_project
def get_project_config(entity):
config = {}
config['schema'] = pypelib.get_avalon_project_config_schema()
config['tasks'] = get_tasks(entity)
config['apps'] = get_project_apps(entity)
config['template'] = pypelib.get_avalon_project_template()
def get_avalon_project_template():
"""Get avalon template
return config
Returns:
dictionary with templates
"""
templates = Anatomy().templates
return {
'workfile': templates["avalon"]["workfile"],
'work': templates["avalon"]["work"],
'publish': templates["avalon"]["publish"]
}
def get_project_config(entity):
proj_config = {}
proj_config['schema'] = 'avalon-core:config-1.0'
proj_config['tasks'] = get_tasks(entity)
proj_config['apps'] = get_project_apps(entity)
proj_config['template'] = get_avalon_project_template()
return proj_config
def get_tasks(project):
@ -539,7 +549,7 @@ def avalon_check_name(entity, inSchema=None):
if entity.entity_type in ['Project']:
# data['type'] = 'project'
name = entity['full_name']
# schema = get_avalon_project_template_schema()
# schema = "avalon-core:project-2.0"
data['silo'] = 'Film'
@ -557,24 +567,6 @@ def avalon_check_name(entity, inSchema=None):
raise ValueError(msg.format(name))
def get_config_data():
path_items = [pypelib.get_presets_path(), 'ftrack', 'ftrack_config.json']
filepath = os.path.sep.join(path_items)
data = dict()
try:
with open(filepath) as data_file:
data = json.load(data_file)
except Exception as e:
msg = (
'Loading "Ftrack Config file" Failed.'
' Please check log for more information.'
)
log.warning("{} - {}".format(msg, str(e)))
return data
def show_errors(obj, event, errors):
title = 'Hey You! You raised few Errors! (*look below*)'
items = []

View file

@ -5,7 +5,7 @@ from avalon import lib as avalonlib
import acre
from pype import api as pype
from pype import lib as pypelib
from .avalon_sync import get_config_data
from pypeapp import config
from .ftrack_base_handler import BaseHandler
from pypeapp import Anatomy
@ -328,10 +328,10 @@ class AppAction(BaseHandler):
pass
# Change status of task to In progress
config = get_config_data()
presets = config.get_presets()["ftrack"]["ftrack_config"]
if 'status_update' in config:
statuses = config['status_update']
if 'status_update' in presets:
statuses = presets['status_update']
actual_status = entity['status']['name'].lower()
next_status_name = None
@ -351,7 +351,7 @@ class AppAction(BaseHandler):
session.commit()
except Exception:
msg = (
'Status "{}" in config wasn\'t found on Ftrack'
'Status "{}" in presets wasn\'t found on Ftrack'
).format(next_status_name)
self.log.warning(msg)

View file

@ -138,8 +138,8 @@ def update_frame_range(comp, representations):
versions = io.find({"type": "version", "_id": {"$in": version_ids}})
versions = list(versions)
start = min(v["data"]["startFrame"] for v in versions)
end = max(v["data"]["endFrame"] for v in versions)
start = min(v["data"]["frameStart"] for v in versions)
end = max(v["data"]["frameEnd"] for v in versions)
fusion_lib.update_frame_range(start, end, comp=comp)

View file

@ -10,10 +10,7 @@ from avalon.houdini import pipeline as houdini
from pype.houdini import lib
from pype.lib import (
any_outdated,
update_task_from_path
)
from pype.lib import any_outdated
PARENT_DIR = os.path.dirname(__file__)
@ -57,8 +54,6 @@ def on_save(*args):
avalon.logger.info("Running callback on save..")
update_task_from_path(hou.hipFile.path())
nodes = lib.get_id_required_nodes()
for node, new_id in lib.generate_ids(nodes):
lib.set_id(node, new_id, overwrite=False)
@ -68,8 +63,6 @@ def on_open(*args):
avalon.logger.info("Running callback on open..")
update_task_from_path(hou.hipFile.path())
if any_outdated():
from ..widgets import popup

View file

@ -205,7 +205,7 @@ def validate_fps():
"""
fps = lib.get_asset_fps()
fps = lib.get_asset()["data"]["fps"]
current_fps = hou.fps() # returns float
if current_fps != fps:

View file

@ -34,11 +34,32 @@ def _subprocess(args):
raise ValueError("\"{}\" was not successful: {}".format(args, output))
def get_handle_irregular(asset):
data = asset["data"]
handle_start = data.get("handle_start", 0)
handle_end = data.get("handle_end", 0)
return (handle_start, handle_end)
def get_hierarchy(asset_name=None):
"""
Obtain asset hierarchy path string from mongo db
Returns:
string: asset hierarchy path
"""
if not asset_name:
asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"])
asset = io.find_one({
"type": 'asset',
"name": asset_name
})
hierarchy_items = []
entity = asset
while True:
parent_id = entity.get("data", {}).get("visualParent")
if not parent_id:
break
entity = io.find_one({"_id": parent_id})
hierarchy_items.append(entity["name"])
return "/".join(hierarchy_items)
def add_tool_to_environment(tools):
@ -157,45 +178,6 @@ def any_outdated():
return False
def update_task_from_path(path):
"""Update the context using the current scene state.
When no changes to the context it will not trigger an update.
When the context for a file could not be parsed an error is logged but not
raised.
"""
if not path:
log.warning("Can't update the current task. Scene is not saved.")
return
# Find the current context from the filename
project = io.find_one({"type": "project"},
projection={"config.template.work": True})
template = project['config']['template']['work']
# Force to use the registered to root to avoid using wrong paths
template = pather.format(template, {"root": avalon.api.registered_root()})
try:
context = pather.parse(template, path)
except ParseError:
log.error("Can't update the current task. Unable to parse the "
"task for: %s (pattern: %s)", path, template)
return
# Find the changes between current Session and the path's context.
current = {
"asset": avalon.api.Session["AVALON_ASSET"],
"task": avalon.api.Session["AVALON_TASK"]
# "app": avalon.api.Session["AVALON_APP"]
}
changes = {key: context[key] for key, current_value in current.items()
if context[key] != current_value}
if changes:
log.info("Updating work task to: %s", context)
avalon.api.update_current_task(**changes)
def _rreplace(s, a, b, n=1):
"""Replace a with b in string s from right side n times"""
return b.join(s.rsplit(a, n))
@ -331,140 +313,107 @@ def _get_host_name():
return _host.__name__.rsplit(".", 1)[-1]
def collect_container_metadata(container):
"""Add additional data based on the current host
def get_asset(asset_name=None):
entity_data_keys_from_project_when_miss = [
"frameStart", "frameEnd", "handleStart", "handleEnd", "fps",
"resolutionWidth", "resolutionHeight"
]
If the host application's lib module does not have a function to inject
additional data it will return the input container
entity_keys_from_project_when_miss = []
alternatives = {
"handleStart": "handles",
"handleEnd": "handles"
}
defaults = {
"handleStart": 0,
"handleEnd": 0
}
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
asset_document = io.find_one({"name": asset_name, "type": "asset"})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
project_document = io.find_one({"type": "project"})
for key in entity_data_keys_from_project_when_miss:
if asset_document["data"].get(key):
continue
value = project_document["data"].get(key)
if value is not None or key not in alternatives:
asset_document["data"][key] = value
continue
alt_key = alternatives[key]
value = asset_document["data"].get(alt_key)
if value is not None:
asset_document["data"][key] = value
continue
value = project_document["data"].get(alt_key)
if value:
asset_document["data"][key] = value
continue
if key in defaults:
asset_document["data"][key] = defaults[key]
for key in entity_keys_from_project_when_miss:
if asset_document.get(key):
continue
value = project_document.get(key)
if value is not None or key not in alternatives:
asset_document[key] = value
continue
alt_key = alternatives[key]
value = asset_document.get(alt_key)
if value:
asset_document[key] = value
continue
value = project_document.get(alt_key)
if value:
asset_document[key] = value
continue
if key in defaults:
asset_document[key] = defaults[key]
return asset_document
def get_project():
io.install()
return io.find_one({"type": "project"})
def get_version_from_path(file):
"""
Finds version number in file path string
Args:
container (dict): collection if representation data in host
file (string): file path
Returns:
generator
"""
# TODO: Improve method of getting the host lib module
host_name = _get_host_name()
package_name = "pype.{}.lib".format(host_name)
hostlib = importlib.import_module(package_name)
if not hasattr(hostlib, "get_additional_data"):
return {}
return hostlib.get_additional_data(container)
def get_asset_fps():
"""Returns project's FPS, if not found will return 25 by default
Returns:
int, float
v: version number in string ('001')
"""
key = "fps"
# FPS from asset data (if set)
asset_data = get_asset_data()
if key in asset_data:
return asset_data[key]
# FPS from project data (if set)
project_data = get_project_data()
if key in project_data:
return project_data[key]
# Fallback to 25 FPS
return 25.0
def get_project_data():
"""Get the data of the current project
The data of the project can contain things like:
resolution
fps
renderer
Returns:
dict:
"""
project_name = io.active_project()
project = io.find_one({"name": project_name,
"type": "project"},
projection={"data": True})
data = project.get("data", {})
return data
def get_asset_data(asset=None):
"""Get the data from the current asset
Args:
asset(str, Optional): name of the asset, eg:
Returns:
dict
"""
asset_name = asset or avalon.api.Session["AVALON_ASSET"]
document = io.find_one({"name": asset_name,
"type": "asset"})
data = document.get("data", {})
return data
def get_data_hierarchical_attr(entity, attr_name):
vp_attr = 'visualParent'
data = entity['data']
value = data.get(attr_name, None)
if value is not None:
return value
elif vp_attr in data:
if data[vp_attr] is None:
parent_id = entity['parent']
else:
parent_id = data[vp_attr]
parent = io.find_one({"_id": parent_id})
return get_data_hierarchical_attr(parent, attr_name)
else:
return None
def get_avalon_project_config_schema():
schema = 'avalon-core:config-1.0'
return schema
def get_avalon_project_template_schema():
schema = "avalon-core:project-2.0"
return schema
def get_avalon_project_template():
from pypeapp import Anatomy
"""
Get avalon template
Returns:
dictionary with templates
"""
templates = Anatomy().templates
proj_template = {}
proj_template['workfile'] = templates["avalon"]["workfile"]
proj_template['work'] = templates["avalon"]["work"]
proj_template['publish'] = templates["avalon"]["publish"]
return proj_template
def get_avalon_asset_template_schema():
schema = "avalon-core:asset-2.0"
return schema
pattern = re.compile(r"[\._]v([0-9]*)")
try:
return pattern.findall(file)[0]
except IndexError:
log.error(
"templates:get_version_from_workfile:"
"`{}` missing version string."
"Example `v004`".format(file)
)
def get_avalon_database():
@ -474,31 +423,20 @@ def get_avalon_database():
def set_io_database():
project = os.environ.get('AVALON_PROJECT', '')
asset = os.environ.get('AVALON_ASSET', '')
silo = os.environ.get('AVALON_SILO', '')
os.environ['AVALON_PROJECT'] = project
os.environ['AVALON_ASSET'] = asset
os.environ['AVALON_SILO'] = silo
required_keys = ["AVALON_PROJECT", "AVALON_ASSET", "AVALON_SILO"]
for key in required_keys:
os.environ[key] = os.environ.get(key, "")
io.install()
def get_all_avalon_projects():
db = get_avalon_database()
project_names = db.collection_names()
projects = []
for name in project_names:
for name in db.collection_names():
projects.append(db[name].find_one({'type': 'project'}))
return projects
def get_presets_path():
templates = os.environ['PYPE_CONFIG']
path_items = [templates, 'presets']
filepath = os.path.sep.join(path_items)
return filepath
def filter_pyblish_plugins(plugins):
"""
This servers as plugin filter / modifier for pyblish. It will load plugin

View file

@ -280,8 +280,8 @@ def collect_animation_data():
# build attributes
data = OrderedDict()
data["startFrame"] = start
data["endFrame"] = end
data["frameStart"] = start
data["frameEnd"] = end
data["handles"] = 0
data["step"] = 1.0
data["fps"] = fps
@ -1858,16 +1858,16 @@ def set_context_settings():
# Todo (Wijnand): apply renderer and resolution of project
project_data = lib.get_project_data()
asset_data = lib.get_asset_data()
project_data = lib.get_project()["data"]
asset_data = lib.get_asset()["data"]
# Set project fps
fps = asset_data.get("fps", project_data.get("fps", 25))
set_scene_fps(fps)
# Set project resolution
width_key = "resolution_width"
height_key = "resolution_height"
width_key = "resolutionWidth"
height_key = "resolutionHeight"
width = asset_data.get(width_key, project_data.get(width_key, 1920))
height = asset_data.get(height_key, project_data.get(height_key, 1080))
@ -1887,7 +1887,7 @@ def validate_fps():
"""
fps = lib.get_asset_fps()
fps = lib.get_asset()["data"]["fps"]
current_fps = mel.eval('currentTimeUnitToFPS()') # returns float
if current_fps != fps:

View file

@ -45,7 +45,7 @@ def checkInventoryVersions():
if container:
node = container["_node"]
avalon_knob_data = get_avalon_knob_data(node)
avalon_knob_data = avalon.nuke.get_avalon_knob_data(node)
# get representation from io
representation = io.find_one({
@ -88,7 +88,7 @@ def writes_version_sync():
for each in nuke.allNodes():
if each.Class() == 'Write':
avalon_knob_data = get_avalon_knob_data(each)
avalon_knob_data = avalon.nuke.get_avalon_knob_data(each)
try:
if avalon_knob_data['families'] not in ["render"]:
@ -119,7 +119,7 @@ def version_up_script():
def get_render_path(node):
data = dict()
data['avalon'] = get_avalon_knob_data(node)
data['avalon'] = avalon.nuke.get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
@ -153,15 +153,15 @@ def format_anatomy(data):
if not version:
file = script_name()
data["version"] = pype.get_version_from_path(file)
project_document = pype.get_project()
data.update({
"root": api.Session["AVALON_PROJECTS"],
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": str(pype.get_task()).lower(),
"task": api.Session["AVALON_TASK"].lower(),
"family": data["avalon"]["family"],
"project": {"name": pype.get_project_name(),
"code": pype.get_project_code()},
"project": {"name": project_document["name"],
"code": project_document["data"].get("code", '')},
"representation": data["nuke_dataflow_writes"]["file_type"],
"app": data["application"]["application_dir"],
"hierarchy": pype.get_hierarchy(),
@ -449,17 +449,17 @@ def reset_frame_range_handles():
root = nuke.root()
name = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": name, "type": "asset"})
asset_entity = pype.get_asset(name)
if "data" not in asset:
if "data" not in asset_entity:
msg = "Asset {} don't have set any 'data'".format(name)
log.warning(msg)
nuke.message(msg)
return
data = asset["data"]
data = asset_entity["data"]
missing_cols = []
check_cols = ["fps", "fstart", "fend", "handle_start", "handle_end"]
check_cols = ["fps", "frameStart", "frameEnd", "handleStart", "handleEnd"]
for col in check_cols:
if col not in data:
@ -473,30 +473,27 @@ def reset_frame_range_handles():
return
# get handles values
handles = avalon.nuke.get_handles(asset)
handle_start, handle_end = pype.get_handle_irregular(asset)
handle_start = asset_entity["data"]["handleStart"]
handle_end = asset_entity["data"]["handleEnd"]
fps = asset["data"]["fps"]
edit_in = int(asset["data"]["fstart"]) - handle_start
edit_out = int(asset["data"]["fend"]) + handle_end
fps = asset_entity["data"]["fps"]
frame_start = int(asset_entity["data"]["frameStart"]) - handle_start
frame_end = int(asset_entity["data"]["frameEnd"]) + handle_end
root["fps"].setValue(fps)
root["first_frame"].setValue(edit_in)
root["last_frame"].setValue(edit_out)
root["first_frame"].setValue(frame_start)
root["last_frame"].setValue(frame_end)
log.info("__ handles: `{}`".format(handles))
log.info("__ handle_start: `{}`".format(handle_start))
log.info("__ handle_end: `{}`".format(handle_end))
log.info("__ edit_in: `{}`".format(edit_in))
log.info("__ edit_out: `{}`".format(edit_out))
log.info("__ fps: `{}`".format(fps))
# setting active viewers
nuke.frame(int(asset["data"]["fstart"]))
nuke.frame(int(asset_entity["data"]["frameStart"]))
range = '{0}-{1}'.format(
int(asset["data"]["fstart"]),
int(asset["data"]["fend"]))
int(asset_entity["data"]["frameStart"]),
int(asset_entity["data"]["frameEnd"]))
for node in nuke.allNodes(filter="Viewer"):
node['frame_range'].setValue(range)
@ -510,21 +507,12 @@ def reset_frame_range_handles():
# adding handle_start/end to root avalon knob
if not avalon.nuke.set_avalon_knob_data(root, {
"handle_start": int(handle_start),
"handle_end": int(handle_end)
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}):
log.warning("Cannot set Avalon knob to Root node!")
def get_avalon_knob_data(node):
import toml
try:
data = toml.loads(node['avalon'].value())
except Exception:
return None
return data
def reset_resolution():
"""Set resolution to project resolution."""
log.info("Reseting resolution")
@ -532,9 +520,9 @@ def reset_resolution():
asset = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": asset, "type": "asset"})
width = asset.get('data', {}).get('resolution_width')
height = asset.get('data', {}).get('resolution_height')
pixel_aspect = asset.get('data', {}).get('pixel_aspect')
width = asset.get('data', {}).get("resolutionWidth")
height = asset.get('data', {}).get("resolutionHeight")
pixel_aspect = asset.get('data', {}).get("pixelAspect")
log.info("pixel_aspect: {}".format(pixel_aspect))
if any(not x for x in [width, height, pixel_aspect]):
@ -575,7 +563,7 @@ def reset_resolution():
crnt_fmt_kargs = {
"width": (check_format.width()),
"height": (check_format.height()),
"pixel_aspect": float(check_format.pixelAspect())
"pixelAspect": float(check_format.pixelAspect())
}
if bbox:
crnt_fmt_kargs.update({
@ -590,7 +578,7 @@ def reset_resolution():
new_fmt_kargs = {
"width": int(width),
"height": int(height),
"pixel_aspect": float(pixel_aspect),
"pixelAspect": float(pixel_aspect),
"project_name": format_name
}
if bbox:
@ -620,13 +608,13 @@ def make_format_string(**args):
"{y} "
"{r} "
"{t} "
"{pixel_aspect:.2f}".format(**args)
"{pixelAspect:.2f}".format(**args)
)
else:
return (
"{width} "
"{height} "
"{pixel_aspect:.2f}".format(**args)
"{pixelAspect:.2f}".format(**args)
)
@ -668,60 +656,6 @@ def get_hierarchical_attr(entity, attr, default=None):
return get_hierarchical_attr(parent, attr)
# TODO: bellow functions are wip and needs to be check where they are used
# ------------------------------------
#
# def update_frame_range(start, end, root=None):
# """Set Nuke script start and end frame range
#
# Args:
# start (float, int): start frame
# end (float, int): end frame
# root (object, Optional): root object from nuke's script
#
# Returns:
# None
#
# """
#
# knobs = {
# "first_frame": start,
# "last_frame": end
# }
#
# with avalon.nuke.viewer_update_and_undo_stop():
# for key, value in knobs.items():
# if root:
# root[key].setValue(value)
# else:
# nuke.root()[key].setValue(value)
#
# #
# def get_additional_data(container):
# """Get Nuke's related data for the container
#
# Args:
# container(dict): the container found by the ls() function
#
# Returns:
# dict
# """
#
# node = container["_node"]
# tile_color = node['tile_color'].value()
# if tile_color is None:
# return {}
#
# hex = '%08x' % tile_color
# rgba = [
# float(int(hex[0:2], 16)) / 255.0,
# float(int(hex[2:4], 16)) / 255.0,
# float(int(hex[4:6], 16)) / 255.0
# ]
#
# return {"color": Qt.QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])}
def get_write_node_template_attr(node):
''' Gets all defined data from presets
@ -729,7 +663,7 @@ def get_write_node_template_attr(node):
'''
# get avalon data from node
data = dict()
data['avalon'] = get_avalon_knob_data(node)
data['avalon'] = avalon.nuke.get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
"preset": data['avalon']['families']
@ -747,7 +681,7 @@ def get_write_node_template_attr(node):
# adding dataflow template
{correct_data.update({k: v})
for k, v in nuke_dataflow_writes.items()
if k not in ["id", "previous"]}
if k not in ["_id", "_previous"]}
# adding colorspace template
{correct_data.update({k: v})

View file

@ -46,14 +46,14 @@ def set_workfiles():
project.setProjectRoot(active_project_root)
# get project data from avalon db
project_data = pype.get_project_data()
project_data = pype.get_project()["data"]
log.info("project_data: {}".format(project_data))
# get format and fps property from avalon db on project
width = project_data['resolution_width']
height = project_data['resolution_height']
pixel_aspect = project_data['pixel_aspect']
width = project_data["resolutionWidth"]
height = project_data["resolutionHeight"]
pixel_aspect = project_data["pixelAspect"]
fps = project_data['fps']
format_name = project_data['code']

View file

@ -14,9 +14,9 @@ def create_nk_script_clips(script_lst, seq=None):
'handles': 10,
'handleStart': 15, # added asymetrically to handles
'handleEnd': 10, # added asymetrically to handles
'timelineIn': 16,
'startFrame': 991,
'endFrame': 1023,
"clipIn": 16,
"frameStart": 991,
"frameEnd": 1023,
'task': 'Comp-tracking',
'work_dir': 'VFX_PR',
'shot': '00010'
@ -55,12 +55,12 @@ def create_nk_script_clips(script_lst, seq=None):
if media_in:
source_in = media_in + handle_start
else:
source_in = nk['startFrame'] + handle_start
source_in = nk["frameStart"] + handle_start
if media_duration:
source_out = (media_in + media_duration - 1) - handle_end
else:
source_out = nk['endFrame'] - handle_end
source_out = nk["frameEnd"] - handle_end
print("__ media: `{}`".format(media))
print("__ media_in: `{}`".format(media_in))
@ -98,8 +98,8 @@ def create_nk_script_clips(script_lst, seq=None):
trackItem.setSourceIn(source_in)
trackItem.setSourceOut(source_out)
trackItem.setSourceIn(source_in)
trackItem.setTimelineIn(nk['timelineIn'])
trackItem.setTimelineOut(nk['timelineIn'] + (source_out - source_in))
trackItem.setTimelineIn(nk["clipIn"])
trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in))
track.addTrackItem(trackItem)
track.addTrackItem(trackItem)
clips_lst.append(trackItem)
@ -179,9 +179,9 @@ script_lst = [{
'handles': 10,
'handleStart': 10,
'handleEnd': 10,
'timelineIn': 16,
'startFrame': 991,
'endFrame': 1023,
"clipIn": 16,
"frameStart": 991,
"frameEnd": 1023,
'task': 'platesMain',
'work_dir': 'shots',
'shot': '120sh020'

View file

@ -87,13 +87,13 @@ class CollectContextDataFromAport(pyblish.api.ContextPlugin):
context.data["currentFile"] = current_file
# get project data from avalon
project_data = pype.get_project_data()
project_data = pype.get_project()["data"]
assert project_data, "No `project_data` data in avalon db"
context.data["projectData"] = project_data
self.log.debug("project_data: {}".format(project_data))
# get asset data from avalon and fix all paths
asset_data = pype.get_asset_data()
asset_data = pype.get_asset()["data"]
assert asset_data, "No `asset_data` data in avalon db"
asset_data = {k: v.replace("\\", "/") for k, v in asset_data.items()
if isinstance(v, str)}

View file

@ -39,19 +39,18 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
assert instances_data, "No `asset_default` data in json file"
asset_name = a_session["AVALON_ASSET"]
entity = io.find_one({"name": asset_name,
"type": "asset"})
entity = pype.get_asset(asset_name)
# get frame start > first try from asset data
frame_start = context.data["assetData"].get("fstart", None)
frame_start = context.data["assetData"].get("frameStart", None)
if not frame_start:
self.log.debug("frame_start not on assetData")
# get frame start > second try from parent data
frame_start = pype.get_data_hierarchical_attr(entity, "fstart")
frame_start = entity["data"]["frameStart"]
if not frame_start:
self.log.debug("frame_start not on any parent entity")
# get frame start > third try from parent data
frame_start = asset_default["fstart"]
frame_start = asset_default["frameStart"]
assert frame_start, "No `frame_start` data found, "
"please set `fstart` on asset"
@ -61,7 +60,7 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
handles = context.data["assetData"].get("handles", None)
if not handles:
# get frame start > second try from parent data
handles = pype.get_data_hierarchical_attr(entity, "handles")
handles = entity["data"]["handles"]
if not handles:
# get frame start > third try from parent data
handles = asset_default["handles"]
@ -129,7 +128,7 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
instance.data.update({
"subset": subset_name,
"task": task,
"fstart": frame_start,
"frameStart": frame_start,
"handles": handles,
"host": host,
"asset": asset,

View file

@ -76,11 +76,11 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
else:
end_frame += (
instance.data['endFrame'] - instance.data['startFrame']
instance.data["frameEnd"] - instance.data["frameStart"]
)
if not comp.get('frameRate'):
comp['frameRate'] = instance.context.data['fps']
if not comp.get('fps'):
comp['fps'] = instance.context.data['fps']
location = self.get_ftrack_location(
'ftrack.server', ft_session
)
@ -90,7 +90,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"metadata": {'ftr_meta': json.dumps({
'frameIn': int(start_frame),
'frameOut': int(end_frame),
'frameRate': float(comp['frameRate'])})}
'frameRate': float(comp['fps'])})}
}
comp['thumbnail'] = False
else:

View file

@ -27,8 +27,8 @@ class FusionSetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@ -60,8 +60,8 @@ class FusionSetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "

View file

@ -145,7 +145,7 @@ class FusionLoadSequence(api.Loader):
tool["Clip"] = path
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("startFrame", None)
start = context["version"]["data"].get("frameStart", None)
if start is not None:
loader_shift(tool, start, relative=False)
@ -175,7 +175,7 @@ class FusionLoadSequence(api.Loader):
been set.
- GlobalIn: Fusion reset to comp's global in if duration changes
- We change it to the "startFrame"
- We change it to the "frameStart"
- GlobalEnd: Fusion resets to globalIn + length if duration changes
- We do the same like Fusion - allow fusion to take control.
@ -212,7 +212,7 @@ class FusionLoadSequence(api.Loader):
# Get start frame from version data
version = io.find_one({"type": "version",
"_id": representation["parent"]})
start = version["data"].get("startFrame")
start = version["data"].get("frameStart")
if start is None:
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "

View file

@ -23,7 +23,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"""Collect Fusion saver instances
This additionally stores the Comp start and end render range in the
current context's data as "startFrame" and "endFrame".
current context's data as "frameStart" and "frameEnd".
"""
@ -43,8 +43,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
savers = [tool for tool in tools if tool.ID == "Saver"]
start, end = get_comp_render_range(comp)
context.data["startFrame"] = start
context.data["endFrame"] = end
context.data["frameStart"] = start
context.data["frameEnd"] = end
for tool in savers:
path = tool["Clip"][comp.TIME_UNDEFINED]

View file

@ -53,8 +53,8 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
# The instance has most of the information already stored
metadata = {
"regex": regex,
"startFrame": instance.context.data["startFrame"],
"endFrame": instance.context.data["endFrame"],
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}

View file

@ -79,8 +79,8 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Fusion",
"Frames": "{start}-{end}".format(
start=int(context.data["startFrame"]),
end=int(context.data["endFrame"])
start=int(context.data["frameStart"]),
end=int(context.data["frameEnd"])
),
"Comment": comment,

View file

@ -1,22 +1,15 @@
import os
import subprocess
import json
from pype import lib as pypelib
from pypeapp import config
from avalon import api
def get_config_data():
path_items = [pypelib.get_presets_path(), 'djv_view', 'config.json']
filepath = os.path.sep.join(path_items)
data = dict()
with open(filepath) as data_file:
data = json.load(data_file)
return data
def get_families():
families = []
paths = get_config_data().get('djv_paths', [])
paths = config.get_presets().get("djv_view", {}).get("config", {}).get(
"djv_paths", []
)
for path in paths:
if os.path.exists(path):
families.append("*")
@ -25,13 +18,15 @@ def get_families():
def get_representation():
return get_config_data().get('file_ext', [])
return config.get_presets().get("djv_view", {}).get("config", {}).get(
'file_ext', []
)
class OpenInDJV(api.Loader):
"""Open Image Sequence with system default"""
config_data = get_config_data()
config_data = config.get_presets().get("djv_view", {}).get("config", {})
families = get_families()
representations = get_representation()
@ -42,7 +37,9 @@ class OpenInDJV(api.Loader):
def load(self, context, name, namespace, data):
self.djv_path = None
paths = get_config_data().get('djv_paths', [])
paths = config.get_presets().get("djv_view", {}).get("config", {}).get(
"djv_paths", []
)
for path in paths:
if os.path.exists(path):
self.djv_path = path

View file

@ -67,9 +67,9 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
if isinstance(component['files'], list):
collections, remainder = clique.assemble(component['files'])
self.log.debug("collecting sequence: {}".format(collections))
instance.data['startFrame'] = int(component['startFrame'])
instance.data['endFrame'] = int(component['endFrame'])
instance.data['frameRate'] = int(component['frameRate'])
instance.data["frameStart"] = int(component["frameStart"])
instance.data["frameEnd"] = int(component["frameEnd"])
instance.data['fps'] = int(component['fps'])
instance.data["representations"].append(component)

View file

@ -6,14 +6,13 @@ from pprint import pformat
import pyblish.api
from avalon import api
import pype.api as pype
def collect(root,
regex=None,
exclude_regex=None,
startFrame=None,
endFrame=None):
frame_start=None,
frame_end=None):
"""Collect sequence collections in root"""
from avalon.vendor import clique
@ -52,10 +51,10 @@ def collect(root,
# Exclude any frames outside start and end frame.
for collection in collections:
for index in list(collection.indexes):
if startFrame is not None and index < startFrame:
if frame_start is not None and index < frame_start:
collection.indexes.discard(index)
continue
if endFrame is not None and index > endFrame:
if frame_end is not None and index > frame_end:
collection.indexes.discard(index)
continue
@ -77,8 +76,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
api.Session["AVALON_ASSET"]
subset (str): The subset to publish to. If not provided the sequence's
head (up to frame number) will be used.
startFrame (int): The start frame for the sequence
endFrame (int): The end frame for the sequence
frame_start (int): The start frame for the sequence
frame_end (int): The end frame for the sequence
root (str): The path to collect from (can be relative to the .json)
regex (str): A regex for the sequence filename
exclude_regex (str): A regex for filename to exclude from collection
@ -143,8 +142,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
collections = collect(root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
startFrame=data.get("startFrame"),
endFrame=data.get("endFrame"))
frame_start=data.get("frameStart"),
frame_end=data.get("frameEnd"))
self.log.info("Found collections: {}".format(collections))
@ -179,8 +178,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = data.get("startFrame", indices[0])
end = data.get("endFrame", indices[-1])
start = data.get("frameStart", indices[0])
end = data.get("frameEnd", indices[-1])
# root = os.path.normpath(root)
# self.log.info("Source: {}}".format(data.get("source", "")))
@ -194,8 +193,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"subset": subset,
"asset": data.get("asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"startFrame": start,
"endFrame": end,
"frameStart": start,
"frameEnd": end,
"fps": fps,
"source": data.get('source', '')
})
@ -211,7 +210,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
'files': list(collection),
"stagingDir": root,
"anatomy_template": "render",
"frameRate": fps,
"fps": fps,
"tags": ['review']
}
instance.data["representations"].append(representation)

View file

@ -1,7 +1,7 @@
import os
import json
import pyblish.api
from pype import lib as pypelib
from pypeapp import config
class CollectOutputRepreConfig(pyblish.api.ContextPlugin):
@ -12,13 +12,5 @@ class CollectOutputRepreConfig(pyblish.api.ContextPlugin):
hosts = ["shell"]
def process(self, context):
config_items = [
pypelib.get_presets_path(),
"ftrack",
"output_representation.json"
]
config_file = os.path.sep.join(config_items)
with open(config_file) as data_file:
config_data = json.load(data_file)
config_data = config.get_presets()["ftrack"]["output_representation"]
context.data['output_repre_config'] = config_data

View file

@ -12,6 +12,6 @@ class CollectProjectData(pyblish.api.ContextPlugin):
def process(self, context):
# get project data from avalon db
context.data["projectData"] = pype.get_project_data()
context.data["projectData"] = pype.get_project()["data"]
return

View file

@ -33,7 +33,7 @@ class ExtractBurnin(pype.api.Extractor):
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
"start_frame": int(instance.data['startFrame']),
"start_frame": int(instance.data["frameStart"]),
"version": version
}
self.log.debug("__ prep_data: {}".format(prep_data))

View file

@ -22,7 +22,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
families = ["imagesequence", "render", "write", "source"]
def process(self, instance):
start = instance.data.get("startFrame")
start = instance.data.get("frameStart")
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
collected_frames = os.listdir(stagingdir)

View file

@ -30,7 +30,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("startFrame")
start_frame = inst_data.get("frameStart")
self.log.debug("Families In: `{}`".format(instance.data["families"]))

View file

@ -404,7 +404,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
"startFrame", "endFrame", "step", "handles", "sourceHashes"
"frameStart", "frameEnd", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:

View file

@ -36,9 +36,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template from anatomy that should be used for
integrating this file. Only the first level can
be specified right now.
'startFrame'
'endFrame'
'framerate'
"frameStart"
"frameEnd"
'fps'
"""
label = "Integrate Asset New"
@ -303,10 +303,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_tail = dst_collection.format("{tail}")
index_frame_start = None
if repre.get('startFrame'):
if repre.get("frameStart"):
frame_start_padding = len(str(
repre.get('endFrame')))
index_frame_start = repre.get('startFrame')
repre.get("frameEnd")))
index_frame_start = repre.get("frameStart")
dst_padding_exp = src_padding_exp
for i in src_collection.indexes:
@ -544,8 +544,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
"startFrame", "endFrame", "step", "handles",
"handle_end", "handle_start", "sourceHashes"
"frameStart", "frameEnd", "step", "handles",
"handleEnd", "handleStart", "sourceHashes"
]
for key in optionals:
if key in instance.data:

View file

@ -408,7 +408,7 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["startFrame", "endFrame", "step",
optionals = ["frameStart", "frameEnd", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:

View file

@ -121,7 +121,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
- publishJobState (str, Optional): "Active" or "Suspended"
This defaults to "Suspended"
This requires a "startFrame" and "endFrame" to be present in instance.data
This requires a "frameStart" and "frameEnd" to be present in instance.data
or in context.data.
"""
@ -259,12 +259,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Get start/end frame from instance, if not available get from context
context = instance.context
start = instance.data.get("startFrame")
start = instance.data.get("frameStart")
if start is None:
start = context.data["startFrame"]
end = instance.data.get("endFrame")
start = context.data["frameStart"]
end = instance.data.get("frameEnd")
if end is None:
end = context.data["endFrame"]
end = context.data["frameEnd"]
# Add in regex for sequence filename
# This assumes the output files start with subset name and ends with
@ -289,8 +289,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
metadata = {
"asset": asset,
"regex": regex,
"startFrame": start,
"endFrame": end,
"frameStart": start,
"frameEnd": end,
"fps": context.data.get("fps", None),
"families": ["render"],
"source": source,
@ -338,8 +338,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
prev_start = version["data"]["startFrame"]
prev_end = version["data"]["endFrame"]
prev_start = version["data"]["frameStart"]
prev_end = version["data"]["frameEnd"]
subset_resources = get_resources(version, _ext)
resource_files = get_resource_files(subset_resources,
@ -375,12 +375,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Please do so when fixing this.
# Start frame
metadata["startFrame"] = updated_start
metadata["metadata"]["instance"]["startFrame"] = updated_start
metadata["frameStart"] = updated_start
metadata["metadata"]["instance"]["frameStart"] = updated_start
# End frame
metadata["endFrame"] = updated_end
metadata["metadata"]["instance"]["endFrame"] = updated_end
metadata["frameEnd"] = updated_end
metadata["metadata"]["instance"]["frameEnd"] = updated_end
metadata_filename = "{}_metadata.json".format(subset)

View file

@ -22,8 +22,8 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
frames = list(collection.indexes)
current_range = (frames[0], frames[-1])
required_range = (instance.data["startFrame"],
instance.data["endFrame"])
required_range = (instance.data["frameStart"],
instance.data["frameEnd"])
if current_range != required_range:
raise ValueError("Invalid frame range: {0} - "

View file

@ -23,8 +23,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
match = re.match("(\w+)\.(\d+)\.vdb", file_name)
result = file_name
start_frame = instance.data.get("startFrame", None)
end_frame = instance.data.get("endFrame", None)
start_frame = instance.data.get("frameStart", None)
end_frame = instance.data.get("frameEnd", None)
if match and start_frame is not None:

View file

@ -55,7 +55,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
# Create nice name if the instance has a frame range.
label = data.get("name", node.name())
if "startFrame" in data and "endFrame" in data:
if "frameStart" in data and "frameEnd" in data:
frames = "[{startFrame} - {endFrame}]".format(**data)
label = "{} {}".format(label, frames)
@ -91,8 +91,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
if node.evalParm("trange") == 0:
return data
data["startFrame"] = node.evalParm("f1")
data["endFrame"] = node.evalParm("f2")
data["frameStart"] = node.evalParm("f1")
data["frameEnd"] = node.evalParm("f2")
data["steps"] = node.evalParm("f3")
return data

View file

@ -6,10 +6,9 @@ import acre
from avalon import api, lib
import pype.api as pype
from pype.aport import lib as aportlib
from pype.api import Logger
log = Logger().get_logger(__name__, "aport")
log = pype.Logger().get_logger(__name__, "aport")
class Aport(api.Action):
@ -50,15 +49,16 @@ class Aport(api.Action):
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
os.environ["AVALON_WORKDIR"] = pype.get_workdir_template()
os.environ["AVALON_WORKDIR"] = aportlib.get_workdir_template()
env.update(dict(os.environ))
try:
lib.launch(executable=executable,
args=arguments,
environment=env
)
lib.launch(
executable=executable,
args=arguments,
environment=env
)
except Exception as e:
log.error(e)
return

View file

@ -3,7 +3,7 @@ import sys
from pprint import pprint
import acre
from avalon import api, lib
from avalon import api, lib, io
import pype.api as pype
@ -44,12 +44,42 @@ class PremierePro(api.Action):
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
os.environ["AVALON_WORKDIR"] = pype.get_workdir_template()
project_name = env.get("AVALON_PROJECT")
anatomy = Anatomy(project_name)
os.environ['AVALON_PROJECT'] = project_name
io.Session['AVALON_PROJECT'] = project_name
task_name = os.environ.get(
"AVALON_TASK", io.Session["AVALON_TASK"]
)
asset_name = os.environ.get(
"AVALON_ASSET", io.Session["AVALON_ASSET"]
)
application = lib.get_application(
os.environ["AVALON_APP_NAME"]
)
project_doc = io.find_one({"type": "project"})
data = {
"task": task_name,
"asset": asset_name,
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code", '')
},
"hierarchy": pype.get_hierarchy(),
"app": application["application_dir"]
}
anatomy_filled = anatomy.format(data)
workdir = anatomy_filled["work"]["folder"]
os.environ["AVALON_WORKDIR"] = workdir
env.update(dict(os.environ))
lib.launch(executable=executable,
args=arguments,
environment=env
)
lib.launch(
executable=executable,
args=arguments,
environment=env
)
return

View file

@ -13,8 +13,8 @@ class CreateVrayProxy(avalon.maya.Creator):
super(CreateVrayProxy, self).__init__(*args, **kwargs)
self.data["animation"] = False
self.data["startFrame"] = 1
self.data["endFrame"] = 1
self.data["frameStart"] = 1
self.data["frameEnd"] = 1
# Write vertex colors
self.data["vertexColors"] = False

View file

@ -25,8 +25,8 @@ class SetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@ -59,8 +59,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "

View file

@ -106,9 +106,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
data["asset"])
# Append start frame and end frame to label if present
if "startFrame" and "endFrame" in data:
label += " [{0}-{1}]".format(int(data["startFrame"]),
int(data["endFrame"]))
if "frameStart" and "frameEnd" in data:
label += " [{0}-{1}]".format(int(data["frameStart"]),
int(data["frameEnd"]))
instance.data["label"] = label

View file

@ -15,8 +15,8 @@ class CollectMayaAscii(pyblish.api.InstancePlugin):
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame
# make ftrack publishable
if instance.data.get('families'):

View file

@ -22,5 +22,5 @@ class CollectModelData(pyblish.api.InstancePlugin):
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame

View file

@ -64,9 +64,9 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
"subset": layername,
"setMembers": layer,
"publish": True,
"startFrame": self.get_render_attribute("startFrame",
"frameStart": self.get_render_attribute("frameStart",
layer=layer),
"endFrame": self.get_render_attribute("endFrame",
"frameEnd": self.get_render_attribute("frameEnd",
layer=layer),
"byFrameStep": self.get_render_attribute("byFrameStep",
layer=layer),
@ -106,8 +106,8 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
# Define nice label
label = "{0} ({1})".format(layername, data["asset"])
label += " [{0}-{1}]".format(int(data["startFrame"]),
int(data["endFrame"]))
label += " [{0}-{1}]".format(int(data["frameStart"]),
int(data["frameEnd"]))
instance = context.create_instance(layername)
instance.data["label"] = label

View file

@ -54,10 +54,10 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug('adding review family to {}'.format(reviewable_subset))
data['review_camera'] = camera
# data["publish"] = False
data['startFrameReview'] = instance.data['startFrame']
data['endFrameReview'] = instance.data['endFrame']
data['startFrame'] = instance.data['startFrame']
data['endFrame'] = instance.data['endFrame']
data['startFrameReview'] = instance.data["frameStart"]
data['endFrameReview'] = instance.data["frameEnd"]
data["frameStart"] = instance.data["frameStart"]
data["frameEnd"] = instance.data["frameEnd"]
data['handles'] = instance.data['handles']
data['step'] = instance.data['step']
data['fps'] = instance.data['fps']
@ -69,8 +69,8 @@ class CollectReview(pyblish.api.InstancePlugin):
else:
instance.data['subset'] = task + 'Review'
instance.data['review_camera'] = camera
instance.data['startFrameReview'] = instance.data['startFrame']
instance.data['endFrameReview'] = instance.data['endFrame']
instance.data['startFrameReview'] = instance.data["frameStart"]
instance.data['endFrameReview'] = instance.data["frameEnd"]
# make ftrack publishable
instance.data["families"] = ['ftrack']

View file

@ -82,8 +82,8 @@ class CollectVRayScene(pyblish.api.ContextPlugin):
"subset": subset,
"setMembers": layer,
"startFrame": start_frame,
"endFrame": end_frame,
"frameStart": start_frame,
"frameEnd": end_frame,
"renderer": "vray",
"resolution": resolution,
"ext": ".{}".format(extension),

View file

@ -45,8 +45,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
instance.data["resources"] = yeti_resources
# Force frame range for export
instance.data["startFrame"] = 1
instance.data["endFrame"] = 1
instance.data["frameStart"] = 1
instance.data["frameEnd"] = 1
def collect_input_connections(self, instance):
"""Collect the inputs for all nodes in the input_SET"""

View file

@ -35,8 +35,8 @@ class ExtractAnimation(pype.api.Extractor):
fullPath=True) or []
# Collect the start and end including handles
start = instance.data["startFrame"]
end = instance.data["endFrame"]
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles

View file

@ -23,8 +23,8 @@ class ExtractCameraAlembic(pype.api.Extractor):
def process(self, instance):
# get settings
framerange = [instance.data.get("startFrame", 1),
instance.data.get("endFrame", 1)]
framerange = [instance.data.get("frameStart", 1),
instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)

View file

@ -88,8 +88,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
def process(self, instance):
# get settings
framerange = [instance.data.get("startFrame", 1),
instance.data.get("endFrame", 1)]
framerange = [instance.data.get("frameStart", 1),
instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)

View file

@ -166,8 +166,8 @@ class ExtractFBX(pype.api.Extractor):
self.log.info("Export options: {0}".format(options))
# Collect the start and end including handles
start = instance.data["startFrame"]
end = instance.data["endFrame"]
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles

View file

@ -25,8 +25,8 @@ class ExtractAlembic(pype.api.Extractor):
nodes = instance[:]
# Collect the start and end including handles
start = instance.data.get("startFrame", 1)
end = instance.data.get("endFrame", 1)
start = instance.data.get("frameStart", 1)
end = instance.data.get("frameEnd", 1)
handles = instance.data.get("handles", 0)
if handles:
start -= handles

View file

@ -114,9 +114,9 @@ class ExtractQuicktime(pype.api.Extractor):
'ext': 'mov',
'files': collected_frames,
"stagingDir": stagingdir,
'startFrame': start,
'endFrame': end,
'frameRate': fps,
"frameStart": start,
"frameEnd": end,
'fps': fps,
'preview': True,
'tags': ['review', 'delete']
}

View file

@ -28,14 +28,14 @@ class ExtractVRayProxy(pype.api.Extractor):
if not anim_on:
# Remove animation information because it is not required for
# non-animated subsets
instance.data.pop("startFrame", None)
instance.data.pop("endFrame", None)
instance.data.pop("frameStart", None)
instance.data.pop("frameEnd", None)
start_frame = 1
end_frame = 1
else:
start_frame = instance.data["startFrame"]
end_frame = instance.data["endFrame"]
start_frame = instance.data["frameStart"]
end_frame = instance.data["frameEnd"]
vertex_colors = instance.data.get("vertexColors", False)

View file

@ -31,8 +31,8 @@ class ExtractYetiCache(pype.api.Extractor):
data_file = os.path.join(dirname, "yeti.fursettings")
# Collect information for writing cache
start_frame = instance.data.get("startFrame")
end_frame = instance.data.get("endFrame")
start_frame = instance.data.get("frameStart")
end_frame = instance.data.get("frameEnd")
preroll = instance.data.get("preroll")
if preroll > 0:
start_frame -= preroll

View file

@ -182,8 +182,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"),
"Frames": "{start}-{end}x{step}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"]),
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"]),
step=int(instance.data["byFrameStep"]),
),
@ -330,7 +330,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame", "byFrameStep"):
for key in ("frameStart", "frameEnd", "byFrameStep"):
value = instance.data[key]
if int(value) == value:

View file

@ -389,8 +389,8 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
},
"frames_range": {
"value": "{start}-{end}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"])),
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])),
"state": True,
"subst": False
},
@ -539,7 +539,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame", "byFrameStep"):
for key in ("frameStart", "frameEnd", "byFrameStep"):
value = instance.data[key]
if int(value) == value:

View file

@ -51,8 +51,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
filename,
vrscene_output)
start_frame = int(instance.data["startFrame"])
end_frame = int(instance.data["endFrame"])
start_frame = int(instance.data["frameStart"])
end_frame = int(instance.data["frameEnd"])
# Primary job
self.log.info("Submitting export job ..")
@ -123,8 +123,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
self.log.info("Submitting render job ..")
start_frame = int(instance.data["startFrame"])
end_frame = int(instance.data["endFrame"])
start_frame = int(instance.data["frameStart"])
end_frame = int(instance.data["frameEnd"])
ext = instance.data.get("ext", "exr")
# Create output directory for renders
@ -215,8 +215,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
return cmd.format(project=instance.context.data["workspaceDir"],
cam=cammera,
startFrame=instance.data["startFrame"],
endFrame=instance.data["endFrame"],
startFrame=instance.data["frameStart"],
endFrame=instance.data["frameEnd"],
layer=instance.name)
def build_jobinfo_environment(self, env):
@ -266,7 +266,7 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
if dir:
return output_path.replace("\\", "/")
start_frame = int(instance.data["startFrame"])
start_frame = int(instance.data["frameStart"])
filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame)
result = filename_zero.replace("\\", "/")

View file

@ -25,8 +25,8 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
def process(self, instance):
start = instance.data.get("startFrame", None)
end = instance.data.get("endFrame", None)
start = instance.data.get("frameStart", None)
end = instance.data.get("frameEnd", None)
handles = instance.data.get("handles", None)
# Check if any of the values are present

View file

@ -51,8 +51,8 @@ class ValidateInstancerFrameRanges(pyblish.api.InstancePlugin):
import pyseq
start_frame = instance.data.get("startFrame", 0)
end_frame = instance.data.get("endFrame", 0)
start_frame = instance.data.get("frameStart", 0)
end_frame = instance.data.get("frameEnd", 0)
required = range(int(start_frame), int(end_frame) + 1)
invalid = list()

View file

@ -21,7 +21,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
angularunits = context.data('angularUnits')
fps = context.data['fps']
asset_fps = lib.get_asset_fps()
asset_fps = lib.get_asset()["data"]["fps"]
self.log.info('Units (linear): {0}'.format(linearunits))
self.log.info('Units (angular): {0}'.format(angularunits))
@ -50,5 +50,5 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
cls.log.debug(current_linear)
cls.log.info("Setting time unit to match project")
asset_fps = lib.get_asset_fps()
asset_fps = lib.get_asset()["data"]["fps"]
mayalib.set_scene_fps(asset_fps)

View file

@ -23,5 +23,5 @@ class ValidateVrayProxy(pyblish.api.InstancePlugin):
cls.log.error("'%s' is empty! This is a bug" % instance.name)
if data["animation"]:
if data["endFrame"] < data["startFrame"]:
if data["frameEnd"] < data["frameStart"]:
cls.log.error("End frame is smaller than start frame")

View file

@ -53,8 +53,8 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
# The instance has most of the information already stored
metadata = {
"regex": regex,
"startFrame": instance.context.data["startFrame"],
"endFrame": instance.context.data["endFrame"],
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}

View file

@ -78,8 +78,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"])
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
),
"Comment": comment,

View file

@ -19,10 +19,10 @@ class ValidateSettingsNuke(pyblish.api.Validator):
asset = io.find_one({"name": os.environ['AVALON_ASSET']})
try:
avalon_resolution = asset["data"].get("resolution", '')
avalon_pixel_aspect = asset["data"].get("pixel_aspect", '')
avalon_pixel_aspect = asset["data"].get("pixelAspect", '')
avalon_fps = asset["data"].get("fps", '')
avalon_first = asset["data"].get("edit_in", '')
avalon_last = asset["data"].get("edit_out", '')
avalon_first = asset["data"].get("frameStart", '')
avalon_last = asset["data"].get("frameEnd", '')
avalon_crop = asset["data"].get("crop", '')
except KeyError:
print(

View file

@ -30,8 +30,8 @@ class SetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
log.info("start: {}, end: {}".format(start, end))
if start is None or end is None:
@ -64,8 +64,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
start = version_data.get("startFrame", None)
end = version_data.get("endFrame", None)
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "

View file

@ -92,8 +92,8 @@ class LoadMov(api.Loader):
version = context['version']
version_data = version.get("data", {})
orig_first = version_data.get("startFrame", None)
orig_last = version_data.get("endFrame", None)
orig_first = version_data.get("frameStart", None)
orig_last = version_data.get("frameEnd", None)
diff = orig_first - 1
# set first to 1
first = orig_first - diff
@ -141,7 +141,7 @@ class LoadMov(api.Loader):
read_node["frame"].setValue(str(offset_frame))
# add additional metadata from the version to imprint to Avalon knob
add_keys = [
"startFrame", "endFrame", "handles", "source", "author",
"frameStart", "frameEnd", "handles", "source", "author",
"fps", "version", "handleStart", "handleEnd"
]
@ -207,8 +207,8 @@ class LoadMov(api.Loader):
version_data = version.get("data", {})
orig_first = version_data.get("startFrame", None)
orig_last = version_data.get("endFrame", None)
orig_first = version_data.get("frameStart", None)
orig_last = version_data.get("frameEnd", None)
diff = orig_first - 1
# set first to 1
first = orig_first - diff
@ -250,8 +250,8 @@ class LoadMov(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"startFrame": version_data.get("startFrame"),
"endFrame": version_data.get("endFrame"),
"frameStart": version_data.get("frameStart"),
"frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"source": version_data.get("source"),
"handles": version_data.get("handles"),

View file

@ -27,8 +27,8 @@ class LinkAsGroup(api.Loader):
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
# Fallback to asset name when namespace is None
if namespace is None:
@ -41,17 +41,14 @@ class LinkAsGroup(api.Loader):
self.log.info("versionData: {}\n".format(context["version"]["data"]))
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("startFrame", None)
self.log.info("start: {}\n".format(start))
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["startFrame", "endFrame", "handle_start", "handle_end", "source", "author", "fps"]
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", "source", "author", "fps"]
data_imprint = {
"start_frame": start,
"fstart": first,
"fend": last,
"start_frame": first,
"frameStart": first,
"frameEnd": last,
"version": vname
}
for k in add_keys:
@ -138,7 +135,7 @@ class LinkAsGroup(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"endFrame": version["data"].get("endFrame"),
"frameEnd": version["data"].get("frameEnd"),
"version": version.get("name"),
"colorspace": version["data"].get("colorspace"),
"source": version["data"].get("source"),

View file

@ -92,8 +92,8 @@ class LoadSequence(api.Loader):
version = context['version']
version_data = version.get("data", {})
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
handles = version_data.get("handles", 0)
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
@ -136,7 +136,7 @@ class LoadSequence(api.Loader):
r["last"].setValue(int(last))
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["startFrame", "endFrame", "handles",
add_keys = ["frameStart", "frameEnd", "handles",
"source", "colorspace", "author", "fps", "version",
"handleStart", "handleEnd"]
@ -198,8 +198,8 @@ class LoadSequence(api.Loader):
version_data = version.get("data", {})
first = version_data.get("startFrame", None)
last = version_data.get("endFrame", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
handles = version_data.get("handles", 0)
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
@ -234,8 +234,8 @@ class LoadSequence(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"startFrame": version_data.get("startFrame"),
"endFrame": version_data.get("endFrame"),
"frameStart": version_data.get("frameStart"),
"frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),

View file

@ -14,5 +14,4 @@ class CollectFramerate(pyblish.api.ContextPlugin):
]
def process(self, context):
context.data["framerate"] = nuke.root()["fps"].getValue()
context.data["fps"] = nuke.root()["fps"].getValue()

View file

@ -99,8 +99,8 @@ class CollectNukeReads(pyblish.api.ContextPlugin):
"stagingDir": source_dir,
"ext": ext,
"label": label,
"startFrame": first_frame,
"endFrame": last_frame,
"frameStart": first_frame,
"frameEnd": last_frame,
"colorspace": node["colorspace"].value(),
"handles": int(asset_data["data"].get("handles", 0)),
"step": 1,

View file

@ -38,8 +38,8 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
first_frame = int(root["first_frame"].getValue())
last_frame = int(root["last_frame"].getValue())
handle_start = int(knob_data.get("handle_start", 0))
handle_end = int(knob_data.get("handle_end", 0))
handle_start = int(knob_data.get("handleStart", 0))
handle_end = int(knob_data.get("handleEnd", 0))
# Get format
format = root['format'].value()
@ -54,17 +54,17 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
script_data = {
"asset": os.getenv("AVALON_ASSET", None),
"version": version,
"startFrame": first_frame + handle_start,
"endFrame": last_frame - handle_end,
"resolution_width": resolution_width,
"resolution_height": resolution_height,
"pixel_aspect": pixel_aspect,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect,
# backward compatibility
"handles": handle_start,
"handle_start": handle_start,
"handle_end": handle_end,
"handleStart": handle_start,
"handleEnd": handle_end,
"step": 1,
"fps": root['fps'].value(),
}

View file

@ -91,8 +91,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"ext": ext,
"label": label,
"handles": handles,
"startFrame": first_frame,
"endFrame": last_frame,
"frameStart": first_frame,
"frameEnd": last_frame,
"outputType": output_type,
"colorspace": node["colorspace"].value(),
})

View file

@ -24,8 +24,8 @@ class NukeRenderLocal(pype.api.Extractor):
self.log.debug("instance collected: {}".format(instance.data))
first_frame = instance.data.get("startFrame", None)
last_frame = instance.data.get("endFrame", None)
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
node_subset_name = instance.data.get("name", None)
self.log.info("Starting render")

View file

@ -67,8 +67,8 @@ class ExtractReviewData(pype.api.Extractor):
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("startFrame", None)
last_frame = instance.data.get("endFrame", None)
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
node = previous_node = nuke.createNode("Read")
@ -149,8 +149,8 @@ class ExtractReviewData(pype.api.Extractor):
'ext': representation,
'files': file,
"stagingDir": stagingDir,
"startFrame": first_frame,
"endFrame": last_frame,
"frameStart": first_frame,
"frameEnd": last_frame,
"anatomy_template": "render",
"tags": tags
}

View file

@ -77,8 +77,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"])
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
),
"Comment": comment,
@ -199,7 +199,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame"):
for key in ("frameStart", "frameEnd"):
value = instance.data[key]
if int(value) == value:

View file

@ -51,7 +51,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
collection = collections[0]
frame_length = int(
instance.data["endFrame"] - instance.data["startFrame"] + 1
instance.data["frameEnd"] - instance.data["frameStart"] + 1
)
if frame_length != 1:

View file

@ -1,5 +1,6 @@
import pyblish.api
from avalon import io
from pype import lib
@pyblish.api.log
@ -15,31 +16,33 @@ class ValidateScript(pyblish.api.InstancePlugin):
ctx_data = instance.context.data
asset_name = ctx_data["asset"]
asset = io.find_one({
"type": "asset",
"name": asset_name
})
# asset = io.find_one({
# "type": "asset",
# "name": asset_name
# })
asset = lib.get_asset(asset_name)
asset_data = asset["data"]
# These attributes will be checked
attributes = [
"fps",
"fstart",
"fend",
"resolution_width",
"resolution_height",
"handle_start",
"handle_end"
"frameStart",
"frameEnd",
"resolutionWidth",
"resolutionHeight",
"handleStart",
"handleEnd"
]
# Value of these attributes can be found on parents
hierarchical_attributes = [
"fps",
"resolution_width",
"resolution_height",
"pixel_aspect",
"handle_start",
"handle_end"
"resolutionWidth",
"resolutionHeight",
"pixelAspect",
"handleStart",
"handleEnd"
]
missing_attributes = []
@ -72,25 +75,25 @@ class ValidateScript(pyblish.api.InstancePlugin):
# Get handles from database, Default is 0 (if not found)
handle_start = 0
handle_end = 0
if "handle_start" in asset_attributes:
handle_start = asset_attributes["handle_start"]
if "handle_end" in asset_attributes:
handle_end = asset_attributes["handle_end"]
if "handleStart" in asset_attributes:
handle_start = asset_attributes["handleStart"]
if "handleEnd" in asset_attributes:
handle_end = asset_attributes["handleEnd"]
# Set frame range with handles
# asset_attributes["fstart"] -= handle_start
# asset_attributes["fend"] += handle_end
# asset_attributes["frameStart"] -= handle_start
# asset_attributes["frameEnd"] += handle_end
# Get values from nukescript
script_attributes = {
"handle_start": ctx_data["handle_start"],
"handle_end": ctx_data["handle_end"],
"handleStart": ctx_data["handleStart"],
"handleEnd": ctx_data["handleEnd"],
"fps": ctx_data["fps"],
"fstart": ctx_data["startFrame"],
"fend": ctx_data["endFrame"],
"resolution_width": ctx_data["resolution_width"],
"resolution_height": ctx_data["resolution_height"],
"pixel_aspect": ctx_data["pixel_aspect"]
"frameStart": ctx_data["frameStart"],
"frameEnd": ctx_data["frameEnd"],
"resolutionWidth": ctx_data["resolutionWidth"],
"resolutionHeight": ctx_data["resolutionHeight"],
"pixelAspect": ctx_data["pixelAspect"]
}
# Compare asset's values Nukescript X Database
@ -107,7 +110,7 @@ class ValidateScript(pyblish.api.InstancePlugin):
msg = "Attributes '{}' are not set correctly"
# Alert user that handles are set if Frame start/end not match
if (
(("fstart" in not_matching) or ("fend" in not_matching)) and
(("frameStart" in not_matching) or ("frameEnd" in not_matching)) and
((handle_start > 0) or (handle_end > 0))
):
msg += " (`handle_start` are set to {})".format(handle_start)

View file

@ -74,8 +74,8 @@ class ValidateNukeWriteBoundingBox(pyblish.api.InstancePlugin):
def check_bounding_box(self, instance):
node = instance[0]
first_frame = instance.data["startFrame"]
last_frame = instance.data["endFrame"]
first_frame = instance.data["frameStart"]
last_frame = instance.data["frameEnd"]
format_width = node.format().width()
format_height = node.format().height()

View file

@ -1,191 +0,0 @@
from pyblish import api
class CollectFramerate(api.ContextPlugin):
"""Collect framerate from selected sequence."""
order = api.CollectorOrder
label = "Collect Framerate"
hosts = ["nukestudio"]
def process(self, context):
for item in context.data.get("selection", []):
context.data["framerate"] = item.sequence().framerate().toFloat()
return
class CollectTrackItems(api.ContextPlugin):
"""Collect all tasks from submission."""
order = api.CollectorOrder
label = "Collect Track Items"
hosts = ["nukestudio"]
def process(self, context):
import os
submission = context.data.get("submission", None)
data = {}
# Set handles
handles = 0
if submission:
for task in submission.getLeafTasks():
if task._cutHandles:
handles = task._cutHandles
self.log.info("__ handles: '{}'".format(handles))
# Skip audio track items
media_type = "core.Hiero.Python.TrackItem.MediaType.kAudio"
if str(task._item.mediaType()) == media_type:
continue
item = task._item
if item.name() not in data:
data[item.name()] = {"item": item, "tasks": [task]}
else:
data[item.name()]["tasks"].append(task)
data[item.name()]["startFrame"] = task.outputRange()[0]
data[item.name()]["endFrame"] = task.outputRange()[1]
else:
for item in context.data.get("selection", []):
# Skip audio track items
# Try/Except is to handle items types, like EffectTrackItem
try:
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
if str(item.mediaType()) != media_type:
continue
except:
continue
data[item.name()] = {
"item": item,
"tasks": [],
"startFrame": item.timelineIn(),
"endFrame": item.timelineOut()
}
for key, value in data.items():
context.create_instance(
name=key,
subset="trackItem",
asset=value["item"].name(),
item=value["item"],
family="trackItem",
tasks=value["tasks"],
startFrame=value["startFrame"] + handles,
endFrame=value["endFrame"] - handles,
handles=handles
)
context.create_instance(
name=key + "_review",
subset="reviewItem",
asset=value["item"].name(),
item=value["item"],
family="trackItem_review",
families=["output"],
handles=handles,
output_path=os.path.abspath(
os.path.join(
context.data["activeProject"].path(),
"..",
"workspace",
key + ".mov"
)
)
)
class CollectTasks(api.ContextPlugin):
"""Collect all tasks from submission."""
order = api.CollectorOrder + 0.01
label = "Collect Tasks"
hosts = ["nukestudio"]
def process(self, context):
import os
import re
import hiero.exporters as he
import clique
for parent in context:
if "trackItem" != parent.data["family"]:
continue
for task in parent.data["tasks"]:
asset_type = None
hiero_cls = he.FnSymLinkExporter.SymLinkExporter
if isinstance(task, hiero_cls):
asset_type = "img"
movie_formats = [".mov", ".R3D"]
ext = os.path.splitext(task.resolvedExportPath())[1]
if ext in movie_formats:
asset_type = "mov"
hiero_cls = he.FnTranscodeExporter.TranscodeExporter
if isinstance(task, hiero_cls):
asset_type = "img"
if task.resolvedExportPath().endswith(".mov"):
asset_type = "mov"
hiero_cls = he.FnNukeShotExporter.NukeShotExporter
if isinstance(task, hiero_cls):
asset_type = "scene"
hiero_cls = he.FnAudioExportTask.AudioExportTask
if isinstance(task, hiero_cls):
asset_type = "audio"
# Skip all non supported export types
if not asset_type:
continue
resolved_path = task.resolvedExportPath()
# Formatting the basename to not include frame padding or
# extension.
name = os.path.splitext(os.path.basename(resolved_path))[0]
name = name.replace(".", "")
name = name.replace("#", "")
name = re.sub(r"%.*d", "", name)
instance = context.create_instance(name=name, parent=parent)
instance.data["task"] = task
instance.data["item"] = parent.data["item"]
instance.data["family"] = "trackItem.task"
instance.data["families"] = [asset_type, "local", "task"]
label = "{1}/{0} - {2} - local".format(
name, parent, asset_type
)
instance.data["label"] = label
instance.data["handles"] = parent.data["handles"]
# Add collection or output
if asset_type == "img":
collection = None
if "#" in resolved_path:
head = resolved_path.split("#")[0]
padding = resolved_path.count("#")
tail = resolved_path.split("#")[-1]
collection = clique.Collection(
head=head, padding=padding, tail=tail
)
if "%" in resolved_path:
collection = clique.parse(
resolved_path, pattern="{head}{padding}{tail}"
)
instance.data["collection"] = collection
else:
instance.data["output_path"] = resolved_path

View file

@ -1,215 +0,0 @@
from pyblish import api
import pype
class CollectPlates(api.InstancePlugin):
"""Collect plates"""
order = api.CollectorOrder + 0.49
label = "Collect Plates"
hosts = ["nukestudio"]
families = ["plate"]
def process(self, instance):
import os
# add to representations
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
context = instance.context
anatomy = context.data.get("anatomy", None)
padding = int(anatomy.templates['render']['padding'])
name = instance.data["subset"]
asset = instance.data["asset"]
track = instance.data["track"]
family = instance.data["family"]
families = instance.data["families"]
version = instance.data["version"]
source_path = instance.data["sourcePath"]
source_file = os.path.basename(source_path)
# staging dir creation
staging_dir = os.path.dirname(
source_path)
item = instance.data["item"]
# get handles
handles = int(instance.data["handles"])
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
# get source frames
source_in = int(instance.data["sourceIn"])
source_out = int(instance.data["sourceOut"])
# get source frames
frame_start = int(instance.data["startFrame"])
frame_end = int(instance.data["endFrame"])
# get source frames
source_in_h = int(instance.data["sourceInH"])
source_out_h = int(instance.data["sourceOutH"])
# get timeline frames
timeline_in = int(instance.data["timelineIn"])
timeline_out = int(instance.data["timelineOut"])
# frame-ranges with handles
timeline_frame_start = int(instance.data["timelineInHandles"])
timeline_frame_end = int(instance.data["timelineOutHandles"])
# get colorspace
colorspace = item.sourceMediaColourTransform()
# get sequence from context, and fps
fps = float(str(instance.data["fps"]))
# test output
self.log.debug("__ handles: {}".format(handles))
self.log.debug("__ handle_start: {}".format(handle_start))
self.log.debug("__ handle_end: {}".format(handle_end))
self.log.debug("__ frame_start: {}".format(frame_start))
self.log.debug("__ frame_end: {}".format(frame_end))
self.log.debug("__ f duration: {}".format(frame_end - frame_start + 1))
self.log.debug("__ source_in: {}".format(source_in))
self.log.debug("__ source_out: {}".format(source_out))
self.log.debug("__ s duration: {}".format(source_out - source_in + 1))
self.log.debug("__ source_in_h: {}".format(source_in_h))
self.log.debug("__ source_out_h: {}".format(source_out_h))
self.log.debug("__ sh duration: {}".format(source_out_h - source_in_h + 1))
self.log.debug("__ timeline_in: {}".format(timeline_in))
self.log.debug("__ timeline_out: {}".format(timeline_out))
self.log.debug("__ t duration: {}".format(timeline_out - timeline_in + 1))
self.log.debug("__ timeline_frame_start: {}".format(
timeline_frame_start))
self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end))
self.log.debug("__ colorspace: {}".format(colorspace))
self.log.debug("__ track: {}".format(track))
self.log.debug("__ fps: {}".format(fps))
self.log.debug("__ source_file: {}".format(source_file))
self.log.debug("__ staging_dir: {}".format(staging_dir))
self.log.debug("__ before family: {}".format(family))
self.log.debug("__ before families: {}".format(families))
#
# this is just workaround because 'clip' family is filtered
instance.data["family"] = families[-1]
instance.data["families"].append(family)
# add to data of representation
version_data.update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end,
"sourceIn": source_in,
"sourceOut": source_out,
"startFrame": frame_start,
"endFrame": frame_end,
"timelineIn": timeline_in,
"timelineOut": timeline_out,
"timelineInHandles": timeline_frame_start,
"timelineOutHandles": timeline_frame_end,
"fps": fps,
"colorspace": colorspace,
"families": [f for f in families if 'ftrack' not in f],
"asset": asset,
"subset": name,
"track": track,
"version": int(version)
})
instance.data["versionData"] = version_data
try:
basename, ext = os.path.splitext(source_file)
head, padding = os.path.splitext(basename)
ext = ext[1:]
padding = padding[1:]
# head, padding, ext = source_file.split('.')
source_first_frame = int(padding)
padding = len(padding)
file = "{head}.%0{padding}d.{ext}".format(
head=head,
padding=padding,
ext=ext
)
start_frame = source_first_frame
end_frame = source_first_frame + source_out
files = [file % i for i in range(
(source_first_frame + source_in_h),
((source_first_frame + source_out_h) + 1), 1)]
except Exception as e:
self.log.debug("Exception in file: {}".format(e))
head, ext = os.path.splitext(source_file)
ext = ext[1:]
files = source_file
start_frame = source_in_h
end_frame = source_out_h
mov_file = head + ".mov"
mov_path = os.path.normpath(os.path.join(staging_dir, mov_file))
if os.path.exists(mov_path):
# adding mov into the representations
self.log.debug("__ mov_path: {}".format(mov_path))
plates_mov_representation = {
'files': mov_file,
'stagingDir': staging_dir,
'startFrame': 0,
'endFrame': source_out - source_in + 1,
'step': 1,
'frameRate': fps,
'preview': True,
'thumbnail': False,
'name': "preview",
'ext': "mov",
}
instance.data["representations"].append(
plates_mov_representation)
thumb_file = head + ".png"
thumb_path = os.path.join(staging_dir, thumb_file)
self.log.debug("__ thumb_path: {}".format(thumb_path))
thumbnail = item.thumbnail(source_in).save(
thumb_path,
format='png'
)
self.log.debug("__ thumbnail: {}".format(thumbnail))
thumb_representation = {
'files': thumb_file,
'stagingDir': staging_dir,
'name': "thumbnail",
'thumbnail': True,
'ext': "png"
}
instance.data["representations"].append(
thumb_representation)
# adding representation for plates
plates_representation = {
'files': files,
'stagingDir': staging_dir,
'name': ext,
'ext': ext,
'startFrame': start_frame,
'endFrame': end_frame,
}
instance.data["representations"].append(plates_representation)
# testing families
family = instance.data["family"]
families = instance.data["families"]
# test prints version_data
self.log.debug("__ version_data: {}".format(version_data))
self.log.debug("__ plates_representation: {}".format(
plates_representation))
self.log.debug("__ after family: {}".format(family))
self.log.debug("__ after families: {}".format(families))
# # this will do FnNsFrameServer
# FnNsFrameServer.renderFrames(*_args)

View file

@ -1,140 +0,0 @@
import os
import subprocess
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
import pype.api
from pype.vendor import ffmpeg
class ExtractPlate(pype.api.Extractor):
"""Extract plate cut to the timeline.
Only supporting mov plates for now. Image sequences already get cut down to
timeline range.
"""
label = "Plate"
hosts = ["nukestudio"]
families = ["plate"]
optional = True
def process(self, instance):
if not instance.data["sourcePath"].endswith(".mov"):
self.log.debug(
"Skipping {} because its not a \"*.mov\" "
"format.".format(instance)
)
return
staging_dir = self.staging_dir(instance)
filename = "{0}_without_sound".format(instance.name) + ".mov"
output_path = os.path.join(staging_dir, filename)
input_path = instance.data["sourcePath"]
self.log.info("Outputting movie to %s" % output_path)
# Cut plate to timeline.
item = instance.data["item"]
start_frame = item.mapTimelineToSource(
item.timelineIn() - (
instance.data["handleStart"]
)
)
end_frame = item.mapTimelineToSource(
item.timelineOut() + (
instance.data["handleEnd"]
)
)
framerate = item.sequence().framerate().toFloat()
output_options = {
"vcodec": "copy",
"ss": start_frame / framerate,
"frames": int(end_frame - start_frame) + 1
}
try:
(
ffmpeg
.input(input_path)
.output(output_path, **output_options)
.run(overwrite_output=True,
capture_stdout=True,
capture_stderr=True)
)
except ffmpeg.Error as e:
ffmpeg_error = "ffmpeg error: {}".format(e.stderr)
self.log.error(ffmpeg_error)
raise RuntimeError(ffmpeg_error)
# Extract audio.
filename = "{0}".format(instance.name) + ".wav"
audio_path = os.path.join(staging_dir, filename)
writeSequenceAudioWithHandles(
audio_path,
item.sequence(),
item.timelineIn(),
item.timelineOut(),
0,
0
)
input_path = output_path
filename = "{0}_with_sound".format(instance.name) + ".mov"
output_path = os.path.join(staging_dir, filename)
args = [
"ffmpeg",
"-i", input_path,
"-i", audio_path,
"-vcodec", "copy",
output_path
]
self.log.debug(subprocess.list2cmdline(args))
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=os.path.dirname(args[-1])
)
output = p.communicate()[0]
if p.returncode != 0:
raise ValueError(output)
self.log.debug(output)
# Adding representation.
ext = os.path.splitext(output_path)[1][1:]
representation = {
"files": os.path.basename(output_path),
"staging_dir": staging_dir,
"startFrame": 0,
"endFrame": end_frame - start_frame,
"step": 1,
"frameRate": framerate,
"thumbnail": False,
"name": ext,
"ext": ext
}
instance.data["representations"] = [representation]
self.log.debug("Adding representation: {}".format(representation))
# Adding thumbnail representation.
path = instance.data["sourcePath"].replace(".mov", ".png")
if not os.path.exists(path):
item.thumbnail(start_frame).save(path, format="png")
representation = {
"files": os.path.basename(path),
"stagingDir": os.path.dirname(path),
"name": "thumbnail",
"thumbnail": True,
"ext": "png"
}
instance.data["representations"].append(representation)
self.log.debug("Adding representation: {}".format(representation))

View file

@ -1,238 +0,0 @@
from pyblish import api
import pype
class ExtractPlates(pype.api.Extractor):
"""Extracts plates"""
order = api.ExtractorOrder
label = "Extract Plates"
hosts = ["nukestudio"]
families = ["encode"]
def process(self, instance):
import os
import hiero.core
# from hiero.ui.nuke_bridge import FnNsFrameServer
# add to representations
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
context = instance.context
anatomy = context.data.get("anatomy", None)
padding = int(anatomy.templates['render']['padding'])
name = instance.data["subset"]
asset = instance.data["asset"]
track = instance.data["track"]
family = instance.data["family"]
families = instance.data["families"]
attrs = instance.data["attributes"]
version = instance.data["version"]
# staging dir creation
self.log.debug("creating staging dir")
self.staging_dir(instance)
staging_dir = instance.data['stagingDir']
Nuke_writer = hiero.core.nuke.ScriptWriter()
item = instance.data["item"]
# get handles
handles = int(instance.data["handles"])
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
# get timeline frames
timeline_in = int(instance.data["timelineIn"])
timeline_out = int(instance.data["timelineOut"])
# frame-ranges with handles
timeline_frame_start = int(instance.data["timelineInHandles"])
timeline_frame_end = int(instance.data["timelineOutHandles"])
# creating comp frame range
frame_start = int(instance.data["startFrame"])
frame_end = int(instance.data["endFrame"])
# get colorspace
colorspace = instance.context.data["colorspace"]
# get sequence from context, and fps
fps = int(instance.data["fps"])
# test output
self.log.debug("__ handles: {}".format(handles))
self.log.debug("__ handle_start: {}".format(handle_start))
self.log.debug("__ handle_end: {}".format(handle_end))
self.log.debug("__ timeline_in: {}".format(timeline_in))
self.log.debug("__ timeline_out: {}".format(timeline_out))
self.log.debug("__ timeline_frame_start: {}".format(
timeline_frame_start))
self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end))
self.log.debug("__ frame_start: {}".format(frame_start))
self.log.debug("__ frame_end: {}".format(frame_end))
self.log.debug("__ colorspace: {}".format(colorspace))
self.log.debug("__ track: {}".format(track))
self.log.debug("__ fps: {}".format(fps))
# Generate Nuke script
write_name = "Write_out"
# root node
root_node = hiero.core.nuke.RootNode(
frame_start,
frame_end,
fps=fps
)
root_node.addProjectSettings(colorspace)
# create write node and link it to root node
Nuke_writer.addNode(root_node)
'''TrackItem.addToNukeScript(script=, firstFrame=None, additionalNodes=[], additionalNodesCallback=None, includeRetimes=False, retimeMethod=None, startHandle=None, endHandle=None, colourTransform=None, offset=0, nodeLabel=None, includeAnnotations=False, includeEffects=True, outputToSequenceFormat=False)'''
item.addToNukeScript(
script=Nuke_writer,
firstFrame=frame_start,
includeRetimes=attrs["includeRetimes"],
retimeMethod=attrs["retimeMethod"],
startHandle=handle_start,
endHandle=handle_end,
includeEffects=attrs["includeEffects"],
includeAnnotations=attrs["includeAnnotations"]
)
write_knobs = attrs["nodes"]["write"]["attributes"]
# TODO: take template from anatomy
nukescript_file = "{asset}_{name}_v{version}.{ext}".format(
asset=asset,
name=name,
version=version,
ext="nk"
)
nukescript_path = os.path.join(
staging_dir, nukescript_file
)
# TODO: take template from anatomy
output_file = "{asset}_{name}_v{version}.%0{padding}d.{ext}".format(
asset=asset,
name=name,
version=version,
padding=padding,
ext=write_knobs["file_type"]
)
output_path = os.path.join(
staging_dir, output_file
)
write_node = hiero.core.nuke.WriteNode(output_path.replace("\\", "/"))
write_node.setKnob("name", write_name)
write_node.setKnob("file_type", write_knobs["file_type"])
for knob, value in write_knobs.items():
write_node.setKnob(knob, value)
Nuke_writer.addNode(write_node)
Nuke_writer.writeToDisk(nukescript_path)
# test prints
self.log.debug("__ output_file: {}".format(output_file))
self.log.debug("__ output_path: {}".format(output_path))
self.log.debug("__ nukescript_file: {}".format(nukescript_file))
self.log.debug("__ nukescript_path: {}".format(nukescript_path))
self.log.debug("__ write_knobs: {}".format(write_knobs))
self.log.debug("__ write_name: {}".format(write_name))
self.log.debug("__ Nuke_writer: {}".format(Nuke_writer))
# create rendering arguments for FnNsFrameServer
_args = [
nukescript_path,
"{}-{}".format(frame_start, frame_end),
write_name,
["main"]
]
# add to data of representation
version_data.update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end,
"timelineIn": timeline_in,
"timelineOut": timeline_out,
"timelineInHandles": timeline_frame_start,
"timelineOutHandles": timeline_frame_end,
"compFrameIn": frame_start,
"compFrameOut": frame_end,
"fps": fps,
"colorspace": write_knobs["colorspace"],
"nukeScriptFileName": nukescript_file,
"nukeWriteFileName": output_file,
"nukeWriteName": write_name,
"FnNsFrameServer_renderFrames_args": str(_args),
"family": family,
"families": families,
"asset": asset,
"subset": name,
"track": track,
"version": int(version)
})
instance.data["versionData"] = version_data
# adding representation for nukescript
nk_representation = {
'files': nukescript_file,
'stagingDir': staging_dir,
'name': "nk",
'ext': "nk",
}
instance.data["representations"].append(nk_representation)
# adding representation for plates
plates_representation = {
'files': [output_file % i for i in range(
frame_start, (frame_end + 1), 1)],
'stagingDir': staging_dir,
'name': write_knobs["file_type"],
'ext': write_knobs["file_type"],
}
instance.data["representations"].append(plates_representation)
# adding checking file to context for ExtractPlateCheck(context) plugin
context.data["platesCheck"] = os.path.join(
staging_dir, output_file % frame_end
)
if not context.data.get("frameServerRenderQueue"):
context.data["frameServerRenderQueue"] = list()
# add to render queue list
context.data["frameServerRenderQueue"].append(_args)
# test prints
self.log.debug("__ before family: {}".format(family))
self.log.debug("__ before families: {}".format(families))
# this is just workaround because 'clip' family is filtered
instance.data["family"] = families[-1]
instance.data["families"].append(family)
# testing families
family = instance.data["family"]
families = instance.data["families"]
# test prints version_data
self.log.debug("__ version_data: {}".format(version_data))
self.log.debug("__ nk_representation: {}".format(nk_representation))
self.log.debug("__ plates_representation: {}".format(
plates_representation))
self.log.debug("__ after family: {}".format(family))
self.log.debug("__ after families: {}".format(families))
# # this will do FnNsFrameServer
# FnNsFrameServer.renderFrames(*_args)

View file

@ -1,138 +0,0 @@
import os
import subprocess
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
import pype.api
class ExtractReview(pype.api.Extractor):
"""Extract Quicktime with optimized codec for reviewing."""
label = "Review"
hosts = ["nukestudio"]
families = ["review"]
optional = True
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{0}_without_sound".format(instance.name) + ".mov"
output_path = os.path.join(staging_dir, filename)
input_path = instance.data["sourcePath"]
item = instance.data["item"]
# Has to be yuv420p for compatibility with older players and smooth
# playback. This does come with a sacrifice of more visible banding
# issues.
start_frame = item.mapTimelineToSource(item.timelineIn())
end_frame = item.mapTimelineToSource(item.timelineOut())
args = [
"ffmpeg",
"-ss", str(start_frame / item.sequence().framerate().toFloat()),
"-i", input_path,
"-pix_fmt", "yuv420p",
"-crf", "18",
"-timecode", "00:00:00:01",
"-vf", "scale=trunc(iw/2)*2:trunc(ih/2)*2",
"-frames", str(int(end_frame - start_frame) + 1),
output_path
]
self.log.debug(subprocess.list2cmdline(args))
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=os.path.dirname(args[-1])
)
output = p.communicate()[0]
if p.returncode != 0:
raise ValueError(output)
self.log.debug(output)
# Extract audio.
filename = "{0}".format(instance.name) + ".wav"
audio_path = os.path.join(staging_dir, filename)
writeSequenceAudioWithHandles(
audio_path,
item.sequence(),
item.timelineIn(),
item.timelineOut(),
0,
0
)
input_path = output_path
filename = "{0}_with_sound".format(instance.name) + ".mov"
output_path = os.path.join(staging_dir, filename)
args = [
"ffmpeg",
"-i", input_path,
"-i", audio_path,
"-vcodec", "copy",
output_path
]
self.log.debug(subprocess.list2cmdline(args))
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=os.path.dirname(args[-1])
)
output = p.communicate()[0]
if p.returncode != 0:
raise ValueError(output)
self.log.debug(output)
# Adding movie representation.
start_frame = int(
instance.data["sourceIn"] - (
instance.data["handleStart"]
)
)
end_frame = int(
instance.data["sourceOut"] + (
instance.data["handleEnd"]
)
)
representation = {
"files": os.path.basename(output_path),
"staging_dir": staging_dir,
"startFrame": 0,
"endFrame": end_frame - start_frame,
"step": 1,
"frameRate": (
instance.context.data["activeSequence"].framerate().toFloat()
),
"preview": True,
"thumbnail": False,
"name": "preview",
"ext": "mov",
}
instance.data["representations"] = [representation]
self.log.debug("Adding representation: {}".format(representation))
# Adding thumbnail representation.
path = instance.data["sourcePath"].replace(".mov", ".png")
if not os.path.exists(path):
item.thumbnail(start_frame).save(path, format="png")
representation = {
"files": os.path.basename(path),
"stagingDir": os.path.dirname(path),
"name": "thumbnail",
"thumbnail": True,
"ext": "png"
}
instance.data["representations"].append(representation)
self.log.debug("Adding representation: {}".format(representation))

View file

@ -78,8 +78,8 @@ class CollectClips(api.ContextPlugin):
"sourceFirst": source_first_frame,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"timelineIn": int(item.timelineIn()),
"timelineOut": int(item.timelineOut())
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut())
}
)

View file

@ -18,14 +18,14 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin):
source_in_h = instance.data["sourceIn"] - handle_start
source_out_h = instance.data["sourceOut"] + handle_end
timeline_in = instance.data["timelineIn"]
timeline_out = instance.data["timelineOut"]
timeline_in = instance.data["clipIn"]
timeline_out = instance.data["clipOut"]
timeline_in_h = timeline_in - handle_start
timeline_out_h = timeline_out + handle_end
# set frame start with tag or take it from timeline
frame_start = instance.data.get("frameStart")
frame_start = instance.data.get("startingFrame")
if not frame_start:
frame_start = timeline_in
@ -36,12 +36,10 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin):
{
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
"startFrame": frame_start,
"endFrame": frame_end,
"timelineInH": timeline_in_h,
"timelineOutH": timeline_out_h,
"edit_in": timeline_in,
"edit_out": timeline_out
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h
}
)
self.log.debug("__ data: {}".format(data))

View file

@ -1,4 +1,3 @@
import json
from pyblish import api
@ -56,6 +55,8 @@ class CollectClipHandles(api.ContextPlugin):
"handleStart", 0
)
instance.data["handleEnd"] = s_asset_data.get("handleEnd", 0)
# debug printing
self.log.debug("_ s_asset_data: `{}`".format(
s_asset_data))
self.log.debug("_ instance.data[handles]: `{}`".format(

View file

@ -161,10 +161,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
})
# adding frame start if any on instance
start_frame = instance.data.get("frameStart")
start_frame = instance.data.get("startingFrame")
if start_frame:
asset_shared.update({
"frameStart": start_frame
"startingFrame": start_frame
})
@ -218,12 +218,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["tasks"] = s_asset_data["tasks"]
# adding frame start if any on instance
start_frame = s_asset_data.get("frameStart")
start_frame = s_asset_data.get("startingFrame")
if start_frame:
instance.data["startFrame"] = start_frame
instance.data["endFrame"] = start_frame + (
instance.data["timelineOut"] -
instance.data["timelineIn"])
instance.data["frameStart"] = start_frame
instance.data["frameEnd"] = start_frame + (
instance.data["clipOut"] -
instance.data["clipIn"])
@ -254,13 +254,11 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
if instance.data.get("main"):
in_info['custom_attributes'] = {
'handles': int(instance.data.get('handles', 0)),
'handle_start': handle_start,
'handle_end': handle_end,
'fstart': instance.data["startFrame"],
'fend': instance.data["endFrame"],
'fps': instance.context.data["fps"],
"edit_in": instance.data["timelineIn"],
"edit_out": instance.data["timelineOut"]
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
'fps': instance.context.data["fps"]
}
# adding SourceResolution if Tag was present
@ -272,9 +270,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
width, height, pixel_aspect))
in_info['custom_attributes'].update({
"resolution_width": width,
"resolution_height": height,
"pixel_aspect": pixel_aspect
"resolutionWidth": width,
"resolutionHeight": height,
"pixelAspect": pixel_aspect
})
in_info['tasks'] = instance.data['tasks']

View file

@ -54,7 +54,8 @@ class CollectPlates(api.InstancePlugin):
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[1]
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
if "review" in instance.data["families"]:
@ -120,7 +121,9 @@ class CollectPlatesData(api.InstancePlugin):
item = instance.data["item"]
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "startFrame", "endFrame", "sourceInH", "sourceOutH", "timelineIn", "timelineOut", "timelineInH", "timelineOutH", "asset", "track", "version"
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
"clipInH", "clipOutH", "asset", "track", "version"
]
# pass data to version
@ -170,10 +173,10 @@ class CollectPlatesData(api.InstancePlugin):
plates_mov_representation = {
'files': mov_file,
'stagingDir': staging_dir,
'startFrame': 0,
'endFrame': instance.data["sourceOut"] - instance.data["sourceIn"] + 1,
"frameStart": 0,
"frameEnd": instance.data["sourceOut"] - instance.data["sourceIn"] + 1,
'step': 1,
'frameRate': instance.context.data["fps"],
'fps': instance.context.data["fps"],
'preview': True,
'thumbnail': False,
'name': "preview",
@ -209,8 +212,8 @@ class CollectPlatesData(api.InstancePlugin):
'stagingDir': staging_dir,
'name': ext,
'ext': ext,
'startFrame': instance.data["startFrame"] - instance.data["handleStart"],
'endFrame': instance.data["endFrame"] + instance.data["handleEnd"],
"frameStart": instance.data["frameStart"] - instance.data["handleStart"],
"frameEnd": instance.data["frameEnd"] + instance.data["handleEnd"],
}
instance.data["representations"].append(plates_representation)

View file

@ -91,10 +91,10 @@ class CollectReviews(api.InstancePlugin):
representation = {
"files": file,
"stagingDir": file_dir,
"startFrame": rev_inst.data.get("sourceIn"),
"endFrame": rev_inst.data.get("sourceOut"),
"frameStart": rev_inst.data.get("sourceIn"),
"frameEnd": rev_inst.data.get("sourceOut"),
"step": 1,
"frameRate": rev_inst.data.get("fps"),
"fps": rev_inst.data.get("fps"),
"preview": True,
"thumbnail": False,
"name": "preview",
@ -140,7 +140,7 @@ class CollectReviews(api.InstancePlugin):
item = instance.data["item"]
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "startFrame", "endFrame", "sourceInH", "sourceOutH", "timelineIn", "timelineOut", "timelineInH", "timelineOutH", "asset", "track", "version"
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track", "version"
]
version_data = dict()

View file

@ -18,7 +18,7 @@ class CollectClipTagFrameStart(api.InstancePlugin):
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "frameStart" in t_family:
if "startingFrame" in t_family:
t_number = t_metadata.get("tag.number", "")
start_frame = int(t_number)
instance.data["frameStart"] = start_frame
instance.data["startingFrame"] = start_frame

View file

@ -6,6 +6,7 @@ from avalon import api as avalon
from pyblish import api as pyblish
from pypeapp import Logger
from .. import api
from pype.aport.lib import set_avalon_workdir
from ..widgets.message_window import message
@ -75,7 +76,7 @@ def extensions_sync():
def install():
api.set_avalon_workdir()
set_avalon_workdir()
log.info("Registering Premiera plug-ins..")
reg_paths = request_aport("/api/register_plugin_path",

View file

@ -138,8 +138,8 @@ def update_frame_range(comp, representations):
versions = io.find({"type": "version", "_id": {"$in": version_ids}})
versions = list(versions)
start = min(v["data"]["startFrame"] for v in versions)
end = max(v["data"]["endFrame"] for v in versions)
start = min(v["data"]["frameStart"] for v in versions)
end = max(v["data"]["frameEnd"] for v in versions)
fusion_lib.update_frame_range(start, end, comp=comp)

View file

@ -301,11 +301,11 @@ class ComponentItem(QtWidgets.QFrame):
'preview': self.is_preview()
}
if ('startFrame' in self.in_data and 'endFrame' in self.in_data):
data['startFrame'] = self.in_data['startFrame']
data['endFrame'] = self.in_data['endFrame']
if ("frameStart" in self.in_data and "frameEnd" in self.in_data):
data["frameStart"] = self.in_data["frameStart"]
data["frameEnd"] = self.in_data["frameEnd"]
if 'frameRate' in self.in_data:
data['frameRate'] = self.in_data['frameRate']
if 'fps' in self.in_data:
data['fps'] = self.in_data['fps']
return data

View file

@ -185,8 +185,8 @@ class DropDataFrame(QtWidgets.QFrame):
'name': file_base,
'ext': file_ext,
'file_info': range,
'startFrame': startFrame,
'endFrame': endFrame,
"frameStart": startFrame,
"frameEnd": endFrame,
'representation': repr_name,
'folder_path': folder_path,
'is_sequence': True,
@ -253,24 +253,24 @@ class DropDataFrame(QtWidgets.QFrame):
ext in self.presets['extensions']['video_file']
):
probe_data = self.load_data_with_probe(filepath)
if 'frameRate' not in data:
if 'fps' not in data:
# default value
frameRate = 25
frameRate_string = probe_data.get('r_frame_rate')
if frameRate_string:
frameRate = int(frameRate_string.split('/')[0])
fps = 25
fps_string = probe_data.get('r_frame_rate')
if fps_string:
fps = int(fps_string.split('/')[0])
output['frameRate'] = frameRate
output['fps'] = fps
if 'startFrame' not in data or 'endFrame' not in data:
if "frameStart" not in data or "frameEnd" not in data:
startFrame = endFrame = 1
endFrame_string = probe_data.get('nb_frames')
if endFrame_string:
endFrame = int(endFrame_string)
output['startFrame'] = startFrame
output['endFrame'] = endFrame
output["frameStart"] = startFrame
output["frameEnd"] = endFrame
if (ext == '.mov') and (not file_info):
file_info = probe_data.get('codec_name')

Some files were not shown because too many files have changed in this diff Show more