Merge branch 'develop' into feature/PYPE-16-sync_avalon_event

This commit is contained in:
Jakub Trllo 2018-12-04 18:48:20 +01:00
commit f586695ed8
107 changed files with 16218 additions and 349 deletions

View file

@ -6,6 +6,15 @@ from avalon import api as avalon
from .launcher_actions import register_launcher_actions
from .lib import collect_container_metadata
import logging
log = logging.getLogger(__name__)
# do not delete these are mandatory
Anatomy = None
Dataflow = None
Metadata = None
Colorspace = None
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
@ -15,12 +24,13 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "global", "load")
def install():
print("Registering global plug-ins..")
log.info("Registering global plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
def uninstall():
print("Deregistering global plug-ins..")
log.info("Deregistering global plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
log.info("Global plug-ins unregistred")

View file

@ -15,6 +15,26 @@ from .action import (
RepairContextAction
)
from app.api import Logger
from . import (
Anatomy,
Colorspace,
Metadata,
Dataflow
)
from .templates import (
load_data_from_templates,
reset_data_from_templates,
get_project_name,
get_project_code,
get_hiearchy,
get_asset,
get_task,
fill_avalon_workdir,
get_version_from_workfile
)
__all__ = [
# plugin classes
"Extractor",
@ -25,5 +45,28 @@ __all__ = [
"ValidateMeshOrder",
# action
"get_errored_instances_from_context",
"RepairAction"
"RepairAction",
"Logger",
# contectual templates
# get data to preloaded templates
"load_data_from_templates",
"reset_data_from_templates",
# get contextual data
"get_project_name",
"get_project_code",
"get_hiearchy",
"get_asset",
"get_task",
"fill_avalon_workdir",
"get_version_from_workfile",
# preloaded templates
"Anatomy",
"Colorspace",
"Metadata",
"Dataflow"
]

View file

@ -14,7 +14,7 @@ def registerApp(app, session):
try:
variant = app['name'].split("_")[1]
except Exception as e:
log.warning("'{0}' - App 'name' and 'variant' is not separated by '_' (variant is set to '')".format(app['name']))
log.warning("'{0}' - App 'name' and 'variant' is not separated by '_' (variant is not set)".format(app['name']))
return
abspath = lib.which_app(app['name'])
@ -23,17 +23,16 @@ def registerApp(app, session):
return
apptoml = toml.load(abspath)
executable = apptoml['executable']
label = app['label']
if 'ftrack_label' in apptoml:
label = apptoml['ftrack_label']
icon = None
# TODO get right icons
if 'nuke' in app['name']:
icon = "https://mbtskoudsalg.com/images/nuke-icon-png-2.png"
label = "Nuke"
elif 'maya' in app['name']:
icon = "http://icons.iconarchive.com/icons/froyoshark/enkel/256/Maya-icon.png"
label = "Autodesk Maya"
if 'icon' in apptoml:
icon = apptoml['icon']
# register action
AppAction(session, label, name, executable, variant, icon).register()

View file

@ -24,14 +24,18 @@ class AvalonIdAttribute(BaseAction):
def discover(self, session, entities, event):
''' Validation '''
'''
Validation
- action is only for Administrators
'''
success = False
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
for role in user['user_security_roles']:
if role['security_role']['name'] == 'Administrator':
success = True
# userId = event['source']['user']['id']
# user = session.query('User where id is ' + userId).one()
# if user['user_security_roles'][0]['security_role']['name'] != 'Administrator':
# return False
return True
return success
def launch(self, session, entities, event):
@ -49,13 +53,21 @@ class AvalonIdAttribute(BaseAction):
})
session.commit()
try:
# Checkbox for event sync
cbxSyncName = 'avalon_auto_sync'
cbxSyncLabel = 'Avalon auto-sync'
cbxSyncExist = False
# Attribute Name and Label
custAttrName = 'avalon_mongo_id'
custAttrLabel = 'Avalon/Mongo Id'
attrs_update = set()
# Types that don't need object_type_id
base = {'show'}
# Don't create custom attribute on these entity types:
exceptions = ['task','milestone','library']
exceptions = ['task', 'milestone']
exceptions.extend(base)
# Get all possible object types
all_obj_types = session.query('ObjectType').all()
@ -73,6 +85,7 @@ class AvalonIdAttribute(BaseAction):
# Get IDs of filtered object types
all_obj_types_id = set()
for obj in all_obj_types:
all_obj_types_id.add(obj['id'])
@ -80,20 +93,60 @@ class AvalonIdAttribute(BaseAction):
current_cust_attr = session.query('CustomAttributeConfiguration').all()
# Filter already existing AvalonMongoID attr.
for attr in current_cust_attr:
if attr['key'] == cbxSyncName:
cbxSyncExist = True
cbxAttribute = attr
if attr['key'] == custAttrName:
if attr['entity_type'] in base:
base.remove(attr['entity_type'])
attrs_update.add(attr)
if attr['object_type_id'] in all_obj_types_id:
all_obj_types_id.remove(attr['object_type_id'])
attrs_update.add(attr)
# Set session back to begin("session.query" raises error on commit)
session.rollback()
# Set security roles for attribute
custAttrSecuRole = session.query('SecurityRole').all()
role_api = session.query('SecurityRole where name is "API"').one()
role_admin = session.query('SecurityRole where name is "Administrator"').one()
roles = [role_api,role_admin]
# Set Text type of Attribute
custom_attribute_type = session.query(
'CustomAttributeType where name is "text"'
).one()
# Get/Set 'avalon' group
groups = session.query('CustomAttributeGroup where name is "avalon"').all()
if len(groups) > 1:
msg = "There are more Custom attribute groups with name 'avalon'"
self.log.warning(msg)
return { 'success': False, 'message':msg }
elif len(groups) < 1:
group = session.create('CustomAttributeGroup', {
'name': 'avalon',
})
session.commit()
else:
group = groups[0]
# Checkbox for auto-sync event / Create or Update(roles + group)
if cbxSyncExist is False:
cbxType = session.query('CustomAttributeType where name is "boolean"').first()
session.create('CustomAttributeConfiguration', {
'entity_type': 'show',
'type': cbxType,
'label': cbxSyncLabel,
'key': cbxSyncName,
'default': False,
'write_security_roles': roles,
'read_security_roles': roles,
'group':group,
})
else:
cbxAttribute['write_security_roles'] = roles
cbxAttribute['read_security_roles'] = roles
cbxAttribute['group'] = group
for entity_type in base:
# Create a custom attribute configuration.
@ -103,8 +156,9 @@ class AvalonIdAttribute(BaseAction):
'label': custAttrLabel,
'key': custAttrName,
'default': '',
'write_security_roles': custAttrSecuRole,
'read_security_roles': custAttrSecuRole,
'write_security_roles': roles,
'read_security_roles': roles,
'group':group,
'config': json.dumps({'markdown': False})
})
@ -117,16 +171,24 @@ class AvalonIdAttribute(BaseAction):
'label': custAttrLabel,
'key': custAttrName,
'default': '',
'write_security_roles': custAttrSecuRole,
'read_security_roles': custAttrSecuRole,
'write_security_roles': roles,
'read_security_roles': roles,
'group':group,
'config': json.dumps({'markdown': False})
})
for attr in attrs_update:
attr['write_security_roles'] = roles
attr['read_security_roles'] = roles
attr['group'] = group
job['status'] = 'done'
session.commit()
except Exception as e:
session.rollback()
job['status'] = 'failed'
session.commit()
self.log.error("Creating custom attributes failed ({})".format(e))
return True

View file

@ -1,5 +1,3 @@
# :coding: utf-8
# :copyright: Copyright (c) 2017 ftrack
import sys
import argparse
import logging
@ -9,12 +7,45 @@ import json
import re
from pype import lib
from ftrack_action_handler import BaseAction
from bson.objectid import ObjectId
from avalon import io, inventory
from avalon.vendor import toml
from pype.ftrack import ftrack_utils
class SyncToAvalon(BaseAction):
'''Edit meta data action.'''
'''
Synchronizing data action - from Ftrack to Avalon DB
Stores all information about entity.
- Name(string) - Most important information = identifier of entity
- Parent(ObjectId) - Avalon Project Id, if entity is not project itself
- Silo(string) - Last parent except project
- Data(dictionary):
- VisualParent(ObjectId) - Avalon Id of parent asset
- Parents(array of string) - All parent names except project
- Tasks(array of string) - Tasks on asset
- FtrackId(string)
- entityType(string) - entity's type on Ftrack
* All Custom attributes in group 'Avalon' which name don't start with 'avalon_'
* These information are stored also for all parents and children entities.
Avalon ID of asset is stored to Ftrack -> Custom attribute 'avalon_mongo_id'.
- action IS NOT creating this Custom attribute if doesn't exist
- run 'Create Custom Attributes' action or do it manually (Not recommended)
If Ftrack entity already has Custom Attribute 'avalon_mongo_id' that stores ID:
- name, parents and silo are checked -> shows error if are not exact the same
- after sync it is not allowed to change names or move entities
If ID in 'avalon_mongo_id' is empty string or is not found in DB:
- tries to find entity by name
- found:
- raise error if ftrackId/visual parent/parents are not same
- not found:
- Creates asset/project
'''
#: Action identifier.
identifier = 'sync.to.avalon'
@ -28,12 +59,20 @@ class SyncToAvalon(BaseAction):
def discover(self, session, entities, event):
''' Validation '''
roleCheck = False
discover = False
for entity in entities:
if entity.entity_type.lower() not in ['task', 'assetversion']:
discover = True
break
roleList = ['Administrator', 'Project Manager']
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
for role in user['user_security_roles']:
if role['security_role']['name'] in roleList:
roleCheck = True
if roleCheck is True:
for entity in entities:
if entity.entity_type.lower() not in ['task', 'assetversion']:
discover = True
break
return discover
@ -54,8 +93,8 @@ class SyncToAvalon(BaseAction):
})
try:
self.log.info("action <" + self.__class__.__name__ + "> is running")
self.log.info("Action <" + self.__class__.__name__ + "> is running")
self.ca_mongoid = 'avalon_mongo_id'
#TODO AVALON_PROJECTS, AVALON_ASSET, AVALON_SILO should be set up otherwise console log shows avalon debug
self.setAvalonAttributes()
self.importable = []
@ -71,34 +110,58 @@ class SyncToAvalon(BaseAction):
for entity in entities:
self.getShotAsset(entity)
# Check duplicate name - raise error if found
all_names = {}
# Check names: REGEX in schema/duplicates - raise error if found
all_names = []
duplicates = []
for e in self.importable:
name = self.checkName(e['name'])
if name in all_names:
duplicates.append("'{}'-'{}'".format(all_names[name], e['name']))
ftrack_utils.avalon_check_name(e)
if e['name'] in all_names:
duplicates.append("'{}'".format(e['name']))
else:
all_names[name] = e['name']
all_names.append(e['name'])
if len(duplicates) > 0:
raise ValueError("Unable to sync: Entity name duplication: {}".format(", ".join(duplicates)))
raise ValueError("Entity name duplication: {}".format(", ".join(duplicates)))
## ----- PROJECT ------
# store Ftrack project- self.importable[0] must be project entity!!!
self.entityProj = self.importable[0]
# set AVALON_ env
os.environ["AVALON_PROJECT"] = self.entityProj["full_name"]
os.environ["AVALON_ASSET"] = self.entityProj["full_name"]
self.avalon_project = None
io.install()
# Import all entities to Avalon DB
for e in self.importable:
self.importToAvalon(session, e)
io.uninstall()
job['status'] = 'done'
session.commit()
self.log.info('Synchronization to Avalon was successfull!')
except ValueError as ve:
job['status'] = 'failed'
session.commit()
message = str(ve)
self.log.error('Error during syncToAvalon: {}'.format(message))
except Exception as e:
job['status'] = 'failed'
message = str(e)
self.log.error('During synchronization to Avalon went something wrong! ({})'.format(message))
session.commit()
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log_message = "{}/{}/Line: {}".format(exc_type, fname, exc_tb.tb_lineno)
self.log.error('Error during syncToAvalon: {}'.format(log_message))
message = 'Unexpected Error - Please check Log for more information'
if len(message) > 0:
message = "Unable to sync: {}".format(message)
return {
'success': False,
'message': message
@ -126,162 +189,116 @@ class SyncToAvalon(BaseAction):
for child in childrens:
self.getShotAsset(child)
def checkName(self, input_name):
if input_name.find(" ") == -1:
name = input_name
else:
name = input_name.replace(" ", "-")
self.log.info("Name of {} was changed to {}".format(input_name, name))
return name
def importToAvalon(self, session, entity):
eLinks = []
ca_mongoid = 'avalon_mongo_id'
# get needed info of entity and all parents
for e in entity['link']:
tmp = session.get(e['type'], e['id'])
eLinks.append(tmp)
entityProj = eLinks[0]
# set AVALON_PROJECT env
os.environ["AVALON_PROJECT"] = entityProj["full_name"]
os.environ["AVALON_ASSET"] = entityProj['full_name']
# --- Begin: PUSH TO Avalon ---
io.install()
## ----- PROJECT ------
# If project don't exists -> <Create project> ELSE <Update Config>
avalon_project = io.find_one({"type": "project", "name": entityProj["full_name"]})
entity_type = entity.entity_type
data = {}
data['ftrackId'] = entity['id']
data['entityType'] = entity_type
for cust_attr in self.custom_attributes:
key = cust_attr['key']
if cust_attr['entity_type'].lower() in ['asset']:
data[key] = entity['custom_attributes'][key]
elif cust_attr['entity_type'].lower() in ['show'] and entity_type.lower() == 'project':
data[key] = entity['custom_attributes'][key]
elif cust_attr['entity_type'].lower() in ['task'] and entity_type.lower() != 'project':
# Put space between capitals (e.g. 'AssetBuild' -> 'Asset Build')
entity_type_full = re.sub(r"(\w)([A-Z])", r"\1 \2", entity_type)
# Get object id of entity type
ent_obj_type_id = session.query('ObjectType where name is "{}"'.format(entity_type_full)).one()['id']
if cust_attr['object_type_id'] == ent_obj_type_id:
data[key] = entity['custom_attributes'][key]
if entity_type.lower() in ['project']:
# Set project Config
config = ftrack_utils.get_config(entity)
# Set project template
template = lib.get_avalon_project_template_schema()
if self.ca_mongoid in entity['custom_attributes']:
try:
projectId = ObjectId(self.entityProj['custom_attributes'][self.ca_mongoid])
self.avalon_project = io.find_one({"_id": projectId})
except:
self.log.debug("Entity {} don't have stored entity id in ftrack".format(entity['name']))
if avalon_project is None:
inventory.save(entityProj['full_name'], config, template)
else:
io.update_many({'type': 'project','name': entityProj['full_name']},
{'$set':{'config':config}})
if self.avalon_project is None:
self.avalon_project = io.find_one({
"type": "project",
"name": entity["full_name"]
})
if self.avalon_project is None:
inventory.save(entity['full_name'], config, template)
self.avalon_project = io.find_one({
"type": "project",
"name": entity["full_name"]
})
data['code'] = entity['name']
elif self.avalon_project['name'] != entity['full_name']:
raise ValueError('You can\'t change name {} to {}, avalon DB won\'t work properly!'.format(avalon_asset['name'], name))
data = ftrack_utils.get_data(self, entity, session,self.custom_attributes)
# Store info about project (FtrackId)
io.update_many({
'type': 'project',
'name': entity['full_name']},
{'$set':{'data':data}})
'name': entity['full_name']
}, {
'$set':{'data':data, 'config':config}
})
projectId = io.find_one({"type": "project", "name": entityProj["full_name"]})["_id"]
if ca_mongoid in entity['custom_attributes']:
entity['custom_attributes'][ca_mongoid] = str(projectId)
self.projectId = self.avalon_project["_id"]
if self.ca_mongoid in entity['custom_attributes']:
entity['custom_attributes'][self.ca_mongoid] = str(self.projectId)
else:
self.log.error("Custom attribute for <{}> is not created.".format(entity['name']))
io.uninstall()
return
# Store project Id
projectId = avalon_project["_id"]
## ----- ASSETS ------
# Presets:
# TODO how to check if entity is Asset Library or AssetBuild?
if entity_type in ['AssetBuild', 'Library']:
silo = 'Assets'
data = ftrack_utils.get_data(self, entity, session, self.custom_attributes)
# return if entity is silo
if len(data['parents']) == 0:
return
else:
silo = 'Film'
silo = data['parents'][0]
os.environ['AVALON_SILO'] = silo
# Get list of parents without project
parents = []
for i in range(1, len(eLinks)-1):
parents.append(eLinks[i])
# Get info for 'Data' in Avalon DB
tasks = []
for child in entity['children']:
if child.entity_type in ['Task']:
tasks.append(child['name'])
folderStruct = []
parentId = None
for parent in parents:
name = self.checkName(parent['name'])
folderStruct.append(name)
parentId = io.find_one({'type': 'asset', 'name': name})['_id']
if parent['parent'].entity_type != 'project' and parentId is None:
self.importToAvalon(parent)
parentId = io.find_one({'type': 'asset', 'name': name})['_id']
hierarchy = os.path.sep.join(folderStruct)
data['visualParent'] = parentId
data['parents'] = folderStruct
data['tasks'] = tasks
data['hierarchy'] = hierarchy
name = self.checkName(entity['name'])
name = entity['name']
os.environ['AVALON_ASSET'] = name
# Try to find asset in current database
avalon_asset = io.find_one({'type': 'asset', 'name': name})
# Create if don't exists
if avalon_asset is None:
inventory.create_asset(name, silo, data, projectId)
self.log.debug("Asset {} - created".format(name))
# Raise error if it seems to be different ent. with same name
elif (avalon_asset['data']['ftrackId'] != data['ftrackId'] or
avalon_asset['data']['visualParent'] != data['visualParent'] or
avalon_asset['data']['parents'] != data['parents']):
raise ValueError('Entity <{}> is not same'.format(name))
# Else update info
else:
io.update_many({'type': 'asset','name': name},
{'$set':{'data':data, 'silo': silo}})
# TODO check if is asset in same folder!!! ???? FEATURE FOR FUTURE
self.log.debug("Asset {} - updated".format(name))
# Try to find asset in current database
avalon_asset = None
if self.ca_mongoid in entity['custom_attributes']:
try:
entityId = ObjectId(entity['custom_attributes'][self.ca_mongoid])
avalon_asset = io.find_one({"_id": entityId})
except:
self.log.debug("Entity {} don't have stored entity id in ftrack".format(entity['name']))
if avalon_asset is None:
avalon_asset = io.find_one({'type': 'asset', 'name': name})
# Create if don't exists
if avalon_asset is None:
inventory.create_asset(name, silo, data, self.projectId)
self.log.debug("Asset {} - created".format(name))
# Raise error if it seems to be different ent. with same name
else:
aD = avalon_asset['data']
# check_attr = ['parents', 'ftrackId', 'visualParent']
if (avalon_asset['data']['parents'] != data['parents'] or
avalon_asset['silo'] != silo):
raise ValueError('In Avalon DB already exists entity with name "{0}"'.format(name))
elif avalon_asset['name'] != entity['name']:
raise ValueError('You can\'t change name {} to {}, avalon DB won\'t work properly - please create new asset'.format(avalon_asset['name'], name))
elif avalon_asset['silo'] != silo or avalon_asset['data']['parents'] != data['parents']:
old_path = "/".join(avalon_asset['data']['parents'])
new_path = "/".join(data['parents'])
raise ValueError('You can\'t move with entities. Entity "{}" was moved from "{}" to "{}" '.format(avalon_asset['name'], old_path, new_path))
# Update info
io.update_many({'type': 'asset','name': name},
{'$set':{'data':data, 'silo': silo}})
self.log.debug("Asset {} - updated".format(name))
entityId = io.find_one({'type': 'asset', 'name': name})['_id']
## FTRACK FEATURE - FTRACK MUST HAVE avalon_mongo_id FOR EACH ENTITY TYPE EXCEPT TASK
# Set custom attribute to avalon/mongo id of entity (parentID is last)
if ca_mongoid in entity['custom_attributes']:
entity['custom_attributes'][ca_mongoid] = str(entityId)
if self.ca_mongoid in entity['custom_attributes']:
entity['custom_attributes'][self.ca_mongoid] = str(entityId)
else:
self.log.error("Custom attribute for <{}> is not created.".format(entity['name']))
io.uninstall()
session.commit()

View file

@ -1,6 +1,7 @@
# :coding: utf-8
# :copyright: Copyright (c) 2017 ftrack
import os
import sys
import logging
import getpass
import platform
@ -10,14 +11,12 @@ from avalon import io, lib, pipeline
from avalon import session as sess
import acre
from app.api import (
Templates,
Logger
)
t = Templates(
type=["anatomy"]
)
from pype import api as pype
log = pype.Logger.getLogger(__name__, "ftrack")
log.debug("pype.Anatomy: {}".format(pype.Anatomy))
class AppAction(object):
@ -34,7 +33,7 @@ class AppAction(object):
def __init__(self, session, label, name, executable, variant=None, icon=None, description=None):
'''Expects a ftrack_api.Session instance'''
self.log = Logger.getLogger(self.__class__.__name__)
self.log = pype.Logger.getLogger(self.__class__.__name__)
# self.logger = Logger.getLogger(__name__)
@ -74,6 +73,8 @@ class AppAction(object):
self._launch
)
self.log.info("Application '{}' - Registered successfully".format(self.label))
def _discover(self, event):
args = self._translate_event(
self.session, event
@ -241,7 +242,9 @@ class AppAction(object):
os.environ["AVALON_APP"] = self.identifier
os.environ["AVALON_APP_NAME"] = self.identifier + "_" + self.variant
anatomy = t.anatomy
os.environ["FTRACK_TASKID"] = id
anatomy = pype.Anatomy
io.install()
hierarchy = io.find_one({"type": 'asset', "name": entity['parent']['name']})[
'data']['parents']
@ -255,9 +258,10 @@ class AppAction(object):
"task": entity['name'],
"asset": entity['parent']['name'],
"hierarchy": hierarchy}
anatomy = anatomy.format(data)
try:
anatomy = anatomy.format(data)
except Exception as e:
log.error("{0} Error in anatomy.format: {1}".format(__name__, e))
os.environ["AVALON_WORKDIR"] = os.path.join(anatomy.work.root, anatomy.work.folder)
# TODO Add paths to avalon setup from tomls
@ -297,20 +301,71 @@ class AppAction(object):
# Full path to executable launcher
execfile = None
for ext in os.environ["PATHEXT"].split(os.pathsep):
fpath = os.path.join(path.strip('"'), self.executable + ext)
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
execfile = fpath
break
if sys.platform == "win32":
for ext in os.environ["PATHEXT"].split(os.pathsep):
fpath = os.path.join(path.strip('"'), self.executable + ext)
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
execfile = fpath
break
pass
# Run SW if was found executable
if execfile is not None:
lib.launch(executable=execfile, args=[], environment=env)
else:
return {
'success': False,
'message': "We didn't found launcher for {0}"
.format(self.label)
}
pass
if sys.platform.startswith('linux'):
execfile = os.path.join(path.strip('"'), self.executable)
if os.path.isfile(execfile):
try:
fp = open(execfile)
except PermissionError as p:
log.error('Access denied on {0} - {1}'.
format(execfile, p))
return {
'success': False,
'message': "Access denied on launcher - {}".
format(execfile)
}
fp.close()
# check executable permission
if not os.access(execfile, os.X_OK):
log.error('No executable permission on {}'.
format(execfile))
return {
'success': False,
'message': "No executable permission - {}"
.format(execfile)
}
pass
else:
log.error('Launcher doesn\'t exist - {}'.
format(execfile))
return {
'success': False,
'message': "Launcher doesn't exist - {}"
.format(execfile)
}
pass
# Run SW if was found executable
if execfile is not None:
lib.launch('/usr/bin/env', args=['bash', execfile], environment=env)
else:
return {
'success': False,
'message': "We didn't found launcher for {0}"
.format(self.label)
}
pass
# Run SW if was found executable
if execfile is not None:
lib.launch(executable=execfile, args=[], environment=env)
else:
return {
'success': False,
'message': "We didn't found launcher for {0}".format(self.label)
}
# RUN TIMER IN FTRACK
username = event['source']['user']['username']
@ -398,7 +453,7 @@ class BaseAction(object):
def __init__(self, session):
'''Expects a ftrack_api.Session instance'''
self.log = Logger.getLogger(self.__class__.__name__)
self.log = pype.Logger.getLogger(self.__class__.__name__)
if self.label is None:
raise ValueError(
@ -435,7 +490,8 @@ class BaseAction(object):
),
self._launch
)
self.log.info("----- action - <" + self.__class__.__name__ + "> - Has been registered -----")
self.log.info("Action '{}' - Registered successfully".format(self.__class__.__name__))
def _discover(self, event):
args = self._translate_event(

View file

@ -7,11 +7,19 @@ import time
from app import style
from app.vendor.Qt import QtCore, QtGui, QtWidgets
from pype.ftrack import credentials, login_dialog as login_dialog
from app.api import Logger
from FtrackServer import FtrackServer
log = Logger.getLogger(__name__)
from pype import api as pype
# load data from templates
pype.load_data_from_templates()
log = pype.Logger.getLogger(__name__, "ftrack")
# Validation if alredy logged into Ftrack
class FtrackRunner:
def __init__(self, main_parent=None, parent=None):
@ -76,7 +84,7 @@ class FtrackRunner:
def runActionServer(self):
if self.actionThread is None:
self.actionThread = threading.Thread(target=self.setActionServer)
self.actionThread.daemon=True
self.actionThread.daemon = True
self.actionThread.start()
log.info("Ftrack action server launched")
@ -107,7 +115,7 @@ class FtrackRunner:
def runEventServer(self):
if self.eventThread is None:
self.eventThread = threading.Thread(target=self.setEventServer)
self.eventThread.daemon=True
self.eventThread.daemon = True
self.eventThread.start()
log.info("Ftrack event server launched")
@ -168,9 +176,9 @@ class FtrackRunner:
self.smEventS.addAction(self.aStopEventS)
# Actions - basic
self.aLogin = QtWidgets.QAction("Login",self.menu)
self.aLogin = QtWidgets.QAction("Login", self.menu)
self.aLogin.triggered.connect(self.validate)
self.aLogout = QtWidgets.QAction("Logout",self.menu)
self.aLogout = QtWidgets.QAction("Logout", self.menu)
self.aLogout.triggered.connect(self.logout)
self.menu.addAction(self.aLogin)

View file

@ -1,10 +1,114 @@
# fttrack help functions
import os
import sys
import re
from pprint import *
import ftrack_api
import os
import traceback
from pprint import *
from pype import lib
import avalon.io as io
import avalon.api
import avalon
from avalon.vendor import toml, jsonschema
from app.api import Logger
log = Logger.getLogger(__name__)
def get_data(parent, entity, session, custom_attributes):
entity_type = entity.entity_type
data = {}
data['ftrackId'] = entity['id']
data['entityType'] = entity_type
for cust_attr in custom_attributes:
key = cust_attr['key']
if cust_attr['entity_type'].lower() in ['asset']:
data[key] = entity['custom_attributes'][key]
elif cust_attr['entity_type'].lower() in ['show'] and entity_type.lower() == 'project':
data[key] = entity['custom_attributes'][key]
elif cust_attr['entity_type'].lower() in ['task'] and entity_type.lower() != 'project':
# Put space between capitals (e.g. 'AssetBuild' -> 'Asset Build')
entity_type_full = re.sub(r"(\w)([A-Z])", r"\1 \2", entity_type)
# Get object id of entity type
ent_obj_type_id = session.query('ObjectType where name is "{}"'.format(entity_type_full)).one()['id']
if cust_attr['object_type_id'] == ent_obj_type_id:
data[key] = entity['custom_attributes'][key]
if entity_type in ['Project']:
data['code'] = entity['name']
return data
# Get info for 'Data' in Avalon DB
tasks = []
for child in entity['children']:
if child.entity_type in ['Task']:
tasks.append(child['name'])
# Get list of parents without project
parents = []
folderStruct = []
for i in range(1, len(entity['link'])-1):
parEnt = session.get(entity['link'][i]['type'], entity['link'][i]['id'])
parName = parEnt['name']
folderStruct.append(parName)
if i > 1:
parents.append(parEnt)
parentId = None
for parent in parents:
parentId = io.find_one({'type': 'asset', 'name': parName})['_id']
if parent['parent'].entity_type != 'project' and parentId is None:
parent.importToAvalon(parent)
parentId = io.find_one({'type': 'asset', 'name': parName})['_id']
hierarchy = os.path.sep.join(folderStruct)
data['visualParent'] = parentId
data['parents'] = folderStruct
data['tasks'] = tasks
data['hierarchy'] = hierarchy
return data
def avalon_check_name(entity, inSchema = None):
ValidationError = jsonschema.ValidationError
alright = True
name = entity['name']
if " " in name:
alright = False
data = {}
data['data'] = {}
data['type'] = 'asset'
schema = "avalon-core:asset-2.0"
# TODO have project any REGEX check?
if entity.entity_type in ['Project']:
# data['type'] = 'project'
name = entity['full_name']
# schema = get_avalon_project_template_schema()['schema']
# elif entity.entity_type in ['AssetBuild','Library']:
# data['silo'] = 'Assets'
# else:
# data['silo'] = 'Film'
data['silo'] = 'Film'
if inSchema is not None:
schema = inSchema
data['schema'] = schema
data['name'] = name
try:
avalon.schema.validate(data)
except ValidationError:
alright = False
if alright is False:
raise ValueError("{} includes unsupported symbols like 'dash' or 'space'".format(name))
def get_apps(entity):
""" Get apps from project
@ -18,10 +122,14 @@ def get_apps(entity):
apps = []
for app in entity['custom_attributes']['applications']:
try:
label = toml.load(lib.which_app(app))['label']
apps.append({'name':app, 'label':label})
app_config = {}
app_config['name'] = app
app_config['label'] = toml.load(avalon.lib.which_app(app))['label']
apps.append(app_config)
except Exception as e:
print('Error with application {0} - {1}'.format(app, e))
log.warning('Error with application {0} - {1}'.format(app, e))
return apps
def get_config(entity):

View file

@ -9,6 +9,7 @@ from .vendor.pather.error import ParseError
import avalon.io as io
import avalon.api
import avalon
log = logging.getLogger(__name__)
@ -354,10 +355,12 @@ def get_avalon_project_template():
"""
template = Templates(type=["anatomy"])
proj_template = {}
# proj_template['workfile'] = '{asset[name]}_{task[name]}_{version:0>3}<_{comment}>'
# proj_template['work'] = '{root}/{project}/{hierarchy}/{asset}/work/{task}'
# proj_template['publish'] = '{root}/{project}/{hierarchy}/{asset}/publish/{family}/{subset}/v{version}/{projectcode}_{asset}_{subset}_v{version}.{representation}'
proj_template['workfile'] = template.anatomy.avalon.workfile
proj_template['work'] = template.anatomy.avalon.work
proj_template['publish'] = template.anatomy.avalon.publish
proj_template['workfile'] = '{asset[name]}_{task[name]}_{version:0>3}<_{comment}>'
proj_template['work'] = '{root}/{project}/{hierarchy}/{asset}/work/{task}'
proj_template['publish'] = '{root}/{project}/{hierarchy}/{asset}/publish/{family}/{subset}/v{version}/{projectcode}_{asset}_{subset}_v{version}.{representation}'
# TODO this down should work but it can't be in default.toml:
# - Raises error when App (e.g. Nuke) is started
# proj_template['workfile'] = template.anatomy.avalon.workfile
# proj_template['work'] = template.anatomy.avalon.work
# proj_template['publish'] = template.anatomy.avalon.publish
return proj_template

View file

@ -1,8 +1,29 @@
import os
import sys
from avalon import api as avalon
from pyblish import api as pyblish
from .. import api as pype
from pype.nuke import menu
from .lib import (
create_write_node
)
import nuke
# removing logger handler created in avalon_core
for name, handler in [(handler.get_name(), handler)
for handler in pype.Logger.logging.root.handlers[:]]:
if "pype" not in str(name).lower():
pype.Logger.logging.root.removeHandler(handler)
log = pype.Logger.getLogger(__name__, "nuke")
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
@ -12,9 +33,77 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "nuke", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "nuke", "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "nuke", "inventory")
self = sys.modules[__name__]
self.nLogger = None
class NukeHandler(pype.Logger.logging.Handler):
'''
Nuke Handler - emits logs into nuke's script editor.
warning will emit nuke.warning()
critical and fatal would popup msg dialog to alert of the error.
'''
def __init__(self):
pype.Logger.logging.Handler.__init__(self)
self.set_name("Pype_Nuke_Handler")
def emit(self, record):
# Formated message:
msg = self.format(record)
if record.levelname.lower() in [
"warning",
"critical",
"fatal",
"error"
]:
nuke.message(msg)
'''Adding Nuke Logging Handler'''
nuke_handler = NukeHandler()
if nuke_handler.get_name() \
not in [handler.get_name()
for handler in pype.Logger.logging.root.handlers[:]]:
pype.Logger.logging.getLogger().addHandler(nuke_handler)
if not self.nLogger:
self.nLogger = pype.Logger
def reload_config():
"""Attempt to reload pipeline at run-time.
CAUTION: This is primarily for development and debugging purposes.
"""
import importlib
for module in (
"app",
"app.api",
"{}.api".format(AVALON_CONFIG),
"{}.templates".format(AVALON_CONFIG),
"{}.nuke".format(AVALON_CONFIG),
"{}.nuke.lib".format(AVALON_CONFIG),
"{}.nuke.templates".format(AVALON_CONFIG),
"{}.nuke.menu".format(AVALON_CONFIG)
):
log.info("Reloading module: {}...".format(module))
module = importlib.import_module(module)
try:
reload(module)
except Exception:
importlib.reload(module)
def install():
print("Registering Nuke plug-ins..")
pype.fill_avalon_workdir()
reload_config()
log.info("Registering Nuke plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
@ -23,48 +112,56 @@ def install():
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
# Disable all families except for the ones we explicitly want to see
family_states = ["imagesequence",
"camera",
"pointcache"]
family_states = [
"render",
"still"
"lifeGroup",
"backdrop",
"imagesequence",
"mov"
"camera",
"pointcache",
]
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
# # work files start at app start
# workfiles.show(
# os.environ["AVALON_WORKDIR"]
# )
menu.install()
# load data from templates
pype.load_data_from_templates()
def uninstall():
print("Deregistering Nuke plug-ins..")
log.info("Deregistering Nuke plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
# reset data from templates
pype.reset_data_from_templates()
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
from avalon.nuke import viewer_update_and_undo_stop, add_publish_knob, log
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""
self.log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
writes = [n for n in instance if
n.Class() == "Write"]
if not writes:
return
from avalon.nuke import (
viewer_update_and_undo_stop,
add_publish_knob
)
# Whether instances should be passthrough based on new value
passthrough = not new_value
with viewer_update_and_undo_stop():
for n in writes:
try:
n["publish"].value()
except ValueError:
n = add_publish_knob(n)
log.info(" `Publish` knob was added to write node..")
current = n["publish"].value()
if current != passthrough:
n["publish"].setValue(passthrough)
with viewer_update_and_undo_stop():
n = instance[0]
try:
n["publish"].value()
except ValueError:
n = add_publish_knob(n)
log.info(" `Publish` knob was added to write node..")
n["publish"].setValue(new_value)

View file

@ -1,14 +1,254 @@
import sys
from collections import OrderedDict
from pprint import pprint
from avalon.vendor.Qt import QtGui
import avalon.nuke
import pype.api as pype
import nuke
log = pype.Logger.getLogger(__name__, "nuke")
self = sys.modules[__name__]
self._project = None
def format_anatomy(data):
from .templates import (
get_anatomy
)
file = script_name()
anatomy = get_anatomy()
# TODO: perhaps should be in try!
padding = anatomy.render.padding
data.update({
"hierarchy": pype.get_hiearchy(),
"frame": "#"*padding,
"VERSION": pype.get_version_from_workfile(file)
})
# log.info("format_anatomy:anatomy: {}".format(anatomy))
return anatomy.format(data)
def script_name():
return nuke.root().knob('name').value()
def create_write_node(name, data):
from .templates import (
get_dataflow,
get_colorspace
)
nuke_dataflow_writes = get_dataflow(**data)
nuke_colorspace_writes = get_colorspace(**data)
try:
anatomy_filled = format_anatomy({
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": pype.get_task(),
"family": data["avalon"]["family"],
"project": {"name": pype.get_project_name(),
"code": pype.get_project_code()},
"representation": nuke_dataflow_writes.file_type,
})
except Exception as e:
log.error("problem with resolving anatomy tepmlate: {}".format(e))
log.debug("anatomy_filled.render: {}".format(anatomy_filled.render))
_data = OrderedDict({
"file": str(anatomy_filled.render.path).replace("\\", "/")
})
# adding dataflow template
{_data.update({k: v})
for k, v in nuke_dataflow_writes.items()
if k not in ["id", "previous"]}
# adding dataflow template
{_data.update({k: v})
for k, v in nuke_colorspace_writes.items()}
_data = avalon.nuke.lib.fix_data_for_node_create(_data)
log.debug(_data)
_data["frame_range"] = data.get("frame_range", None)
instance = avalon.nuke.lib.add_write_node(
name,
**_data
)
instance = avalon.nuke.lib.imprint(instance, data["avalon"])
add_rendering_knobs(instance)
return instance
def add_rendering_knobs(node):
if "render" not in node.knobs():
knob = nuke.Boolean_Knob("render", "Render")
knob.setFlag(0x1000)
knob.setValue(False)
node.addKnob(knob)
if "render_farm" not in node.knobs():
knob = nuke.Boolean_Knob("render_farm", "Render on Farm")
knob.setValue(False)
node.addKnob(knob)
return node
def update_frame_range(start, end, root=None):
"""Set Nuke script start and end frame range
Args:
start (float, int): start frame
end (float, int): end frame
root (object, Optional): root object from nuke's script
Returns:
None
"""
knobs = {
"first_frame": start,
"last_frame": end
}
with avalon.nuke.viewer_update_and_undo_stop():
for key, value in knobs.items():
if root:
root[key].setValue(value)
else:
nuke.root()[key].setValue(value)
def get_additional_data(container):
"""Get Nuke's related data for the container
Args:
container(dict): the container found by the ls() function
Returns:
dict
"""
node = container["_tool"]
tile_color = node['tile_color'].value()
if tile_color is None:
return {}
hex = '%08x' % tile_color
rgba = [
float(int(hex[0:2], 16)) / 255.0,
float(int(hex[2:4], 16)) / 255.0,
float(int(hex[4:6], 16)) / 255.0
]
return {"color": QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])}
def set_viewers_colorspace(viewer):
assert isinstance(viewer, dict), log.error(
"set_viewers_colorspace(): argument should be dictionary")
filter_knobs = [
"viewerProcess",
"wipe_position"
]
viewers = [n for n in nuke.allNodes() if n.Class() == 'Viewer']
erased_viewers = []
for v in viewers:
v['viewerProcess'].setValue(str(viewer.viewerProcess))
if str(viewer.viewerProcess) not in v['viewerProcess'].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
pprint(copy_knobs)
# delete viewer with wrong settings
erased_viewers.append(v['name'].value())
nuke.delete(v)
# create new viewer
nv = nuke.createNode("Viewer")
# connect to original inputs
for i, n in enumerate(copy_inputs):
nv.setInput(i, n)
# set coppied knobs
for k, v in copy_knobs.items():
print(k, v)
nv[k].setValue(v)
# set viewerProcess
nv['viewerProcess'].setValue(str(viewer.viewerProcess))
if erased_viewers:
log.warning(
"Attention! Viewer nodes {} were erased."
"It had wrong color profile".format(erased_viewers))
def set_root_colorspace(root_dict):
assert isinstance(root_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
for knob, value in root_dict.items():
if nuke.root()[knob].value() not in value:
nuke.root()[knob].setValue(str(value))
log.info("nuke.root()['{}'] changed to: {}".format(knob, value))
def set_writes_colorspace(write_dict):
assert isinstance(write_dict, dict), log.error(
"set_root_colorspace(): argument should be dictionary")
log.info("set_writes_colorspace(): {}".format(write_dict))
def set_colorspace():
from pype import api as pype
nuke_colorspace = getattr(pype.Colorspace, "nuke", None)
try:
set_root_colorspace(nuke_colorspace.root)
except AttributeError:
log.error(
"set_colorspace(): missing `root` settings in template")
try:
set_viewers_colorspace(nuke_colorspace.viewer)
except AttributeError:
log.error(
"set_colorspace(): missing `viewer` settings in template")
try:
set_writes_colorspace(nuke_colorspace.write)
except AttributeError:
log.error(
"set_colorspace(): missing `write` settings in template")
try:
for key in nuke_colorspace:
log.info("{}".format(key))
except TypeError:
log.error("Nuke is not in templates! \n\n\n"
"contact your supervisor!")
def get_avalon_knob_data(node):
import toml
try:
data = toml.loads(node['avalon'].value())
except:
return None
return data
# TODO: bellow functions are wip and needs to be check where they are used
# ------------------------------------
def update_frame_range(start, end, root=None):
"""Set Nuke script start and end frame range

12
pype/nuke/menu.py Normal file
View file

@ -0,0 +1,12 @@
import nuke
from avalon.api import Session
from pype.nuke import lib
def install():
menubar = nuke.menu("Nuke")
menu = menubar.findItem(Session["AVALON_LABEL"])
menu.addSeparator()
menu.addCommand("Set colorspace...", lib.set_colorspace)

41
pype/nuke/templates.py Normal file
View file

@ -0,0 +1,41 @@
from pype import api as pype
log = pype.Logger.getLogger(__name__, "nuke")
def get_anatomy(**kwarg):
return pype.Anatomy
def get_dataflow(**kwarg):
log.info(kwarg)
host = kwarg.get("host", "nuke")
cls = kwarg.get("class", None)
preset = kwarg.get("preset", None)
assert any([host, cls]), log.error("nuke.templates.get_dataflow():"
"Missing mandatory kwargs `host`, `cls`")
nuke_dataflow = getattr(pype.Dataflow, str(host), None)
nuke_dataflow_node = getattr(nuke_dataflow.nodes, str(cls), None)
if preset:
nuke_dataflow_node = getattr(nuke_dataflow_node, str(preset), None)
log.info("Dataflow: {}".format(nuke_dataflow_node))
return nuke_dataflow_node
def get_colorspace(**kwarg):
log.info(kwarg)
host = kwarg.get("host", "nuke")
cls = kwarg.get("class", None)
preset = kwarg.get("preset", None)
assert any([host, cls]), log.error("nuke.templates.get_colorspace():"
"Missing mandatory kwargs `host`, `cls`")
nuke_colorspace = getattr(pype.Colorspace, str(host), None)
nuke_colorspace_node = getattr(nuke_colorspace, str(cls), None)
if preset:
nuke_colorspace_node = getattr(nuke_colorspace_node, str(preset), None)
log.info("Colorspace: {}".format(nuke_colorspace_node))
return nuke_colorspace_node

View file

@ -0,0 +1,22 @@
import os
import ftrack_api_old as ftrack_api
import pyblish.api
class CollectFtrackApi(pyblish.api.ContextPlugin):
""" Collects an ftrack session and the current task id. """
order = pyblish.api.CollectorOrder
label = "Collect Ftrack Api"
def process(self, context):
# Collect session
session = ftrack_api.Session()
context.data["ftrackSession"] = session
# Collect task
task_id = os.environ.get("FTRACK_TASKID", "")
context.data["ftrackTask"] = session.get("Task", task_id)

View file

@ -0,0 +1,288 @@
import os
import pyblish.api
import clique
class IntegrateFtrackApi(pyblish.api.InstancePlugin):
""" Commit components to server. """
order = pyblish.api.IntegratorOrder+0.499
label = "Integrate Ftrack Api"
families = ["ftrack"]
def query(self, entitytype, data):
""" Generate a query expression from data supplied.
If a value is not a string, we'll add the id of the entity to the
query.
Args:
entitytype (str): The type of entity to query.
data (dict): The data to identify the entity.
exclusions (list): All keys to exclude from the query.
Returns:
str: String query to use with "session.query"
"""
queries = []
for key, value in data.iteritems():
if not isinstance(value, (basestring, int)):
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
query = (
"select id from " + entitytype + " where " + " and ".join(queries)
)
self.log.debug(query)
return query
def process(self, instance):
session = instance.context.data["ftrackSession"]
task = instance.context.data["ftrackTask"]
info_msg = "Created new {entity_type} with data: {data}"
info_msg += ", metadata: {metadata}."
# Iterate over components and publish
for data in instance.data.get("ftrackComponentsList", []):
# AssetType
# Get existing entity.
assettype_data = {"short": "upload"}
assettype_data.update(data.get("assettype_data", {}))
assettype_entity = session.query(
self.query("AssetType", assettype_data)
).first()
# Create a new entity if none exits.
if not assettype_entity:
assettype_entity = session.create("AssetType", assettype_data)
self.log.info(
"Created new AssetType with data: ".format(assettype_data)
)
# Asset
# Get existing entity.
asset_data = {
"name": task["name"],
"type": assettype_entity,
"parent": task["parent"],
}
asset_data.update(data.get("asset_data", {}))
asset_entity = session.query(
self.query("Asset", asset_data)
).first()
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
asset_metadata = asset_data.pop("metadata", {})
# Create a new entity if none exits.
if not asset_entity:
asset_entity = session.create("Asset", asset_data)
self.log.info(
info_msg.format(
entity_type="Asset",
data=asset_data,
metadata=asset_metadata
)
)
# Adding metadata
existing_asset_metadata = asset_entity["metadata"]
existing_asset_metadata.update(asset_metadata)
asset_entity["metadata"] = existing_asset_metadata
# AssetVersion
# Get existing entity.
assetversion_data = {
"version": 0,
"asset": asset_entity,
"task": task
}
assetversion_data.update(data.get("assetversion_data", {}))
assetversion_entity = session.query(
self.query("AssetVersion", assetversion_data)
).first()
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
assetversion_metadata = assetversion_data.pop("metadata", {})
# Create a new entity if none exits.
if not assetversion_entity:
assetversion_entity = session.create(
"AssetVersion", assetversion_data
)
self.log.info(
info_msg.format(
entity_type="AssetVersion",
data=assetversion_data,
metadata=assetversion_metadata
)
)
# Adding metadata
existing_assetversion_metadata = assetversion_entity["metadata"]
existing_assetversion_metadata.update(assetversion_metadata)
assetversion_entity["metadata"] = existing_assetversion_metadata
# Have to commit the version and asset, because location can't
# determine the final location without.
session.commit()
# Component
# Get existing entity.
component_data = {
"name": "main",
"version": assetversion_entity
}
component_data.update(data.get("component_data", {}))
component_entity = session.query(
self.query("Component", component_data)
).first()
component_overwrite = data.get("component_overwrite", False)
location = data.get("component_location", session.pick_location())
# Overwrite existing component data if requested.
if component_entity and component_overwrite:
origin_location = session.query(
"Location where name is \"ftrack.origin\""
).one()
# Removing existing members from location
components = list(component_entity.get("members", []))
components += [component_entity]
for component in components:
for loc in component["component_locations"]:
if location["id"] == loc["location_id"]:
location.remove_component(
component, recursive=False
)
# Deleting existing members on component entity
for member in component_entity.get("members", []):
session.delete(member)
del(member)
session.commit()
# Reset members in memory
if "members" in component_entity.keys():
component_entity["members"] = []
# Add components to origin location
try:
collection = clique.parse(data["component_path"])
except ValueError:
# Assume its a single file
# Changing file type
name, ext = os.path.splitext(data["component_path"])
component_entity["file_type"] = ext
origin_location.add_component(
component_entity, data["component_path"]
)
else:
# Changing file type
component_entity["file_type"] = collection.format("{tail}")
# Create member components for sequence.
for member_path in collection:
size = 0
try:
size = os.path.getsize(member_path)
except OSError:
pass
name = collection.match(member_path).group("index")
member_data = {
"name": name,
"container": component_entity,
"size": size,
"file_type": os.path.splitext(member_path)[-1]
}
component = session.create(
"FileComponent", member_data
)
origin_location.add_component(
component, member_path, recursive=False
)
component_entity["members"].append(component)
# Add components to location.
location.add_component(
component_entity, origin_location, recursive=True
)
data["component"] = component_entity
msg = "Overwriting Component with path: {0}, data: {1}, "
msg += "location: {2}"
self.log.info(
msg.format(
data["component_path"],
component_data,
location
)
)
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
component_metadata = component_data.pop("metadata", {})
# Create new component if none exists.
new_component = False
if not component_entity:
component_entity = assetversion_entity.create_component(
data["component_path"],
data=component_data,
location=location
)
data["component"] = component_entity
msg = "Created new Component with path: {0}, data: {1}"
msg += ", metadata: {2}, location: {3}"
self.log.info(
msg.format(
data["component_path"],
component_data,
component_metadata,
location
)
)
new_component = True
# Adding metadata
existing_component_metadata = component_entity["metadata"]
existing_component_metadata.update(component_metadata)
component_entity["metadata"] = existing_component_metadata
# Setting assetversion thumbnail
if data.get("thumbnail", False):
assetversion_entity["thumbnail_id"] = component_entity["id"]
# Inform user about no changes to the database.
if (component_entity and not component_overwrite and
not new_component):
data["component"] = component_entity
self.log.info(
"Found existing component, and no request to overwrite. "
"Nothing has been changed."
)
else:
# Commit changes.
session.commit()

View file

@ -0,0 +1,67 @@
import pyblish.api
import os
class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"""Collect ftrack component data
Add ftrack component list to instance.
"""
order = pyblish.api.IntegratorOrder + 0.48
label = 'Integrate Ftrack Component'
family_mapping = {'camera': 'cam',
'look': 'look',
'mayaAscii': 'scene',
'model': 'geo',
'rig': 'rig',
'setdress': 'setdress',
'pointcache': 'cache',
'review': 'mov'}
def process(self, instance):
self.log.debug('instance {}'.format(instance))
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
version_number = int(assumed_version)
family = instance.data['family'].lower()
asset_type = ''
asset_type = self.family_mapping[family]
componentList = []
transfers = instance.data["transfers"]
ft_session = instance.context.data["ftrackSession"]
location = ft_session.query(
'Location where name is "ftrack.unmanaged"').one()
self.log.debug('location {}'.format(location))
for src, dest in transfers:
filename, ext = os.path.splitext(src)
self.log.debug('source filename: ' + filename)
self.log.debug('source ext: ' + ext)
componentList.append({"assettype_data": {
"short": asset_type,
},
"assetversion_data": {
"version": version_number,
},
"component_data": {
"name": ext[1:], # Default component name is "main".
},
"component_path": dest,
'component_location': location,
"component_overwrite": False,
}
)
self.log.debug('componentsList: {}'.format(str(componentList)))
instance.data["ftrackComponentsList"] = componentList

View file

@ -1,7 +1,7 @@
import pyblish.api
class CollectColorbleedComment(pyblish.api.ContextPlugin):
class CollectComment(pyblish.api.ContextPlugin):
"""This plug-ins displays the comment dialog box per default"""
label = "Collect Comment"

View file

@ -18,6 +18,3 @@ class CollectTemplates(pyblish.api.ContextPlugin):
type=["anatomy"]
)
context.data['anatomy'] = templates.anatomy
for key in templates.anatomy:
self.log.info(str(key) + ": " + str(templates.anatomy[key]))
# return

View file

@ -2,7 +2,7 @@ import pyblish.api
from avalon import api
class CollectMindbenderTime(pyblish.api.ContextPlugin):
class CollectTime(pyblish.api.ContextPlugin):
"""Store global time at the time of publish"""
label = "Collect Current Time"

View file

@ -233,6 +233,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"root": root,
"project": PROJECT,
"projectcode": "prjX",
'task': api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],

View file

@ -125,7 +125,7 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
hosts = ["fusion", "maya", "nuke"]
families = [
"saver.deadline",
"render.deadline",
"renderlayer",
"imagesequence"
]

View file

@ -107,6 +107,7 @@ def seq_to_glob(path):
"<f>": "<f>"
}
lower = path.lower()
has_pattern = False
for pattern, regex_pattern in patterns.items():
@ -213,6 +214,9 @@ class CollectLook(pyblish.api.InstancePlugin):
with lib.renderlayer(instance.data["renderlayer"]):
self.collect(instance)
# make ftrack publishable
instance.data["families"] = ['ftrack']
def collect(self, instance):
self.log.info("Looking for look associations "

View file

@ -7,7 +7,7 @@ class CollectModelData(pyblish.api.InstancePlugin):
"""Collect model data
Ensures always only a single frame is extracted (current frame).
Note:
This is a workaround so that the `studio.model` family can use the
same pointcache extractor implementation as animation and pointcaches.
@ -24,3 +24,6 @@ class CollectModelData(pyblish.api.InstancePlugin):
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
# make ftrack publishable
instance.data["families"] = ['ftrack']

View file

@ -0,0 +1,47 @@
import pyblish.api
class CollectNukeRenderMode(pyblish.api.InstancePlugin):
# TODO: rewrite docstring to nuke
"""Collect current comp's render Mode
Options:
local
deadline
Note that this value is set for each comp separately. When you save the
comp this information will be stored in that file. If for some reason the
available tool does not visualize which render mode is set for the
current comp, please run the following line in the console (Py2)
comp.GetData("rendermode")
This will return the name of the current render mode as seen above under
Options.
"""
order = pyblish.api.CollectorOrder + 0.4
label = "Collect Render Mode"
hosts = ["nuke"]
families = ["write", "render.local"]
def process(self, instance):
"""Collect all image sequence tools"""
options = ["local", "deadline"]
node = instance[0]
if bool(node["render_local"].getValue()):
rendermode = "local"
else:
rendermode = "deadline"
assert rendermode in options, "Must be supported render mode"
# Append family
instance.data["families"].remove("render")
family = "render.{0}".format(rendermode)
instance.data["families"].append(family)
self.log.info("Render mode: {0}".format(rendermode))

View file

@ -0,0 +1,116 @@
import os
import nuke
import pyblish.api
class Extract(pyblish.api.InstancePlugin):
"""Super class for write and writegeo extractors."""
order = pyblish.api.ExtractorOrder
optional = True
label = "Extract Nuke [super]"
hosts = ["nuke"]
match = pyblish.api.Subset
# targets = ["process.local"]
def execute(self, instance):
# Get frame range
node = instance[0]
first_frame = nuke.root()["first_frame"].value()
last_frame = nuke.root()["last_frame"].value()
if node["use_limit"].value():
first_frame = node["first"].value()
last_frame = node["last"].value()
# Render frames
nuke.execute(node.name(), int(first_frame), int(last_frame))
class ExtractNukeWrite(Extract):
""" Extract output from write nodes. """
families = ["write", "local"]
label = "Extract Write"
def process(self, instance):
self.execute(instance)
# Validate output
for filename in list(instance.data["collection"]):
if not os.path.exists(filename):
instance.data["collection"].remove(filename)
self.log.warning("\"{0}\" didn't render.".format(filename))
class ExtractNukeCache(Extract):
label = "Cache"
families = ["cache", "local"]
def process(self, instance):
self.execute(instance)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg
class ExtractNukeCamera(Extract):
label = "Camera"
families = ["camera", "local"]
def process(self, instance):
node = instance[0]
node["writeGeometries"].setValue(False)
node["writePointClouds"].setValue(False)
node["writeAxes"].setValue(False)
file_path = node["file"].getValue()
node["file"].setValue(instance.data["output_path"])
self.execute(instance)
node["writeGeometries"].setValue(True)
node["writePointClouds"].setValue(True)
node["writeAxes"].setValue(True)
node["file"].setValue(file_path)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg
class ExtractNukeGeometry(Extract):
label = "Geometry"
families = ["geometry", "local"]
def process(self, instance):
node = instance[0]
node["writeCameras"].setValue(False)
node["writePointClouds"].setValue(False)
node["writeAxes"].setValue(False)
file_path = node["file"].getValue()
node["file"].setValue(instance.data["output_path"])
self.execute(instance)
node["writeCameras"].setValue(True)
node["writePointClouds"].setValue(True)
node["writeAxes"].setValue(True)
node["file"].setValue(file_path)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg

View file

@ -0,0 +1,98 @@
import re
import os
import json
import subprocess
import pyblish.api
from pype.action import get_errored_plugins_from_data
def _get_script():
"""Get path to the image sequence script"""
# todo: use a more elegant way to get the python script
try:
from pype.fusion.scripts import publish_filesequence
except Exception:
raise RuntimeError("Expected module 'publish_imagesequence'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
class PublishImageSequence(pyblish.api.InstancePlugin):
"""Publish the generated local image sequences."""
order = pyblish.api.IntegratorOrder
label = "Publish Rendered Image Sequence(s)"
hosts = ["fusion"]
families = ["saver.renderlocal"]
def process(self, instance):
# Skip this plug-in if the ExtractImageSequence failed
errored_plugins = get_errored_plugins_from_data(instance.context)
if any(plugin.__name__ == "FusionRenderLocal" for plugin in
errored_plugins):
raise RuntimeError("Fusion local render failed, "
"publishing images skipped.")
subset = instance.data["subset"]
ext = instance.data["ext"]
# Regex to match resulting renders
regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset),
ext=re.escape(ext))
# The instance has most of the information already stored
metadata = {
"regex": regex,
"startFrame": instance.context.data["startFrame"],
"endFrame": instance.context.data["endFrame"],
"families": ["imagesequence"],
}
# Write metadata and store the path in the instance
output_directory = instance.data["outputDir"]
path = os.path.join(output_directory,
"{}_metadata.json".format(subset))
with open(path, "w") as f:
json.dump(metadata, f)
assert os.path.isfile(path), ("Stored path is not a file for %s"
% instance.data["name"])
# Suppress any subprocess console
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
process = subprocess.Popen(["python", _get_script(),
"--paths", path],
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
while True:
output = process.stdout.readline()
# Break when there is no output or a return code has been given
if output == '' and process.poll() is not None:
process.stdout.close()
break
if output:
line = output.strip()
if line.startswith("ERROR"):
self.log.error(line)
else:
self.log.info(line)
if process.returncode != 0:
raise RuntimeError("Process quit with non-zero "
"return code: {}".format(process.returncode))

View file

@ -0,0 +1,147 @@
import os
import json
import getpass
from avalon import api
from avalon.vendor import requests
import pyblish.api
class NukeSubmitDeadline(pyblish.api.InstancePlugin):
# TODO: rewrite docstring to nuke
"""Submit current Comp to Deadline
Renders are submitted to a Deadline Web Service as
supplied via the environment variable AVALON_DEADLINE
"""
label = "Submit to Deadline"
order = pyblish.api.IntegratorOrder
hosts = ["nuke"]
families = ["write", "render.deadline"]
def process(self, instance):
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
"http://localhost:8082")
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
# Collect all saver instances in context that are to be rendered
write_instances = []
for instance in context[:]:
if not self.families[0] in instance.data.get("families"):
# Allow only saver family instances
continue
if not instance.data.get("publish", True):
# Skip inactive instances
continue
self.log.debug(instance.data["name"])
write_instances.append(instance)
if not write_instances:
raise RuntimeError("No instances found for Deadline submittion")
hostVersion = int(context.data["hostVersion"])
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
deadline_user = context.data.get("deadlineUser", getpass.getuser())
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
"BatchName": filename,
# Job name, as seen in Monitor
"Name": filename,
# User, as seen in Monitor
"UserName": deadline_user,
# Use a default submission pool for Nuke
"Pool": "nuke",
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"])
),
"Comment": comment,
},
"PluginInfo": {
# Input
"FlowFile": filepath,
# Mandatory for Deadline
"Version": str(hostVersion),
# Render in high quality
"HighQuality": True,
# Whether saver output should be checked after rendering
# is complete
"CheckOutput": True,
# Proxy: higher numbers smaller images for faster test renders
# 1 = no proxy quality
"Proxy": 1,
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
# Enable going to rendered frames from Deadline Monitor
for index, instance in enumerate(write_instances):
path = instance.data["path"]
folder, filename = os.path.split(path)
payload["JobInfo"]["OutputDirectory%d" % index] = folder
payload["JobInfo"]["OutputFilename%d" % index] = filename
# Include critical variables with submission
keys = [
# TODO: This won't work if the slaves don't have accesss to
# these paths, such as if slaves are running Linux and the
# submitter is on Windows.
"PYTHONPATH",
"NUKE_PATH"
# "OFX_PLUGIN_PATH",
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
})
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(AVALON_DEADLINE)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store the response for dependent job submission plug-ins
for instance in write_instances:
instance.data["deadlineSubmissionJob"] = response.json()

View file

@ -0,0 +1,2 @@
# creates backdrop which is published as separate nuke script
# it is versioned by major version

View file

@ -0,0 +1,3 @@
# create vanilla camera if no camera is selected
# if camera is selected then it will convert it into containerized object
# it is major versioned in publish

View file

@ -0,0 +1,8 @@
# create publishable read node usually used for enabling version tracking
# also useful for sharing across shots or assets
# if read nodes are selected it will convert them to centainer
# if no read node selected it will create read node and offer browser to shot resource folder
# type movie > mov or imagesequence
# type still > matpaint .psd, .tif, .png,

View file

@ -0,0 +1,17 @@
# type: render
# if no render type node in script then first is having in name [master] for definition of main script renderer
# colorspace setting from templates
# dataflow setting from templates
# type: mask_render
# created with shuffle gizmo for RGB separation into davinci matte
# colorspace setting from templates
# dataflow setting from templates
# type: prerender
# backdrop with write and read
# colorspace setting from templates
# dataflow setting from templates
# type: geo
# dataflow setting from templates

View file

@ -0,0 +1,149 @@
from collections import OrderedDict
import avalon.api
import avalon.nuke
from pype.nuke import (
create_write_node
)
from pype import api as pype
import nuke
log = pype.Logger.getLogger(__name__, "nuke")
def subset_to_families(subset, family, families):
subset_sufx = str(subset).replace(family, "")
new_subset = families + subset_sufx
return "{}.{}".format(family, new_subset)
class CrateWriteRender(avalon.nuke.Creator):
# change this to template preset
preset = "render"
name = "WriteRender"
label = "Create Write Render"
hosts = ["nuke"]
family = "{}_write".format(preset)
families = preset
icon = "sign-out"
def __init__(self, *args, **kwargs):
super(CrateWriteRender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family.split("_")[1]
data["families"] = self.families
{data.update({k: v}) for k, v in self.data.items()
if k not in data.keys()}
self.data = data
def process(self):
self.name = self.data["subset"]
family = self.family.split("_")[0]
node = self.family.split("_")[1]
instance = nuke.toNode(self.data["subset"])
if not instance:
write_data = {
"class": node,
"preset": family,
"avalon": self.data
}
create_write_node(self.data["subset"], write_data)
return
class CrateWritePrerender(avalon.nuke.Creator):
# change this to template preset
preset = "prerender"
name = "WritePrerender"
label = "Create Write Prerender"
hosts = ["nuke"]
family = "{}_write".format(preset)
families = preset
icon = "sign-out"
def __init__(self, *args, **kwargs):
super(CrateWritePrerender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family.split("_")[1]
data["families"] = self.families
{data.update({k: v}) for k, v in self.data.items()
if k not in data.keys()}
self.data = data
def process(self):
self.name = self.data["subset"]
instance = nuke.toNode(self.data["subset"])
family = self.family.split("_")[0]
node = self.family.split("_")[1]
if not instance:
write_data = {
"class": node,
"preset": family,
"avalon": self.data
}
create_write_node(self.data["subset"], write_data)
return
class CrateWriteStill(avalon.nuke.Creator):
# change this to template preset
preset = "still"
name = "WriteStill"
label = "Create Write Still"
hosts = ["nuke"]
family = "{}_write".format(preset)
families = preset
icon = "image"
def __init__(self, *args, **kwargs):
super(CrateWriteStill, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family.split("_")[1]
data["families"] = self.families
{data.update({k: v}) for k, v in self.data.items()
if k not in data.keys()}
self.data = data
def process(self):
self.name = self.data["subset"]
instance = nuke.toNode(self.data["subset"])
family = self.family.split("_")[0]
node = self.family.split("_")[1]
if not instance:
write_data = {
"frame_range": [nuke.frame(), nuke.frame()],
"class": node,
"preset": family,
"avalon": self.data
}
nuke.createNode("FrameHold", "first_frame {}".format(nuke.frame()))
create_write_node(self.data["subset"], write_data)
return

View file

@ -1,48 +0,0 @@
import os
import avalon.api
import avalon.nuke
import nuke
class CrateWriteExr(avalon.api.Creator):
name = "Write_exr"
label = "Create Write: exr"
hosts = ["nuke"]
family = "write"
icon = "sign-out"
# def __init__(self, *args, **kwargs):
# super(CrateWriteExr, self).__init__(*args, **kwargs)
# self.data.setdefault("subset", "this")
def process(self):
# nuke = getattr(sys.modules["__main__"], "nuke", None)
data = {}
ext = "exr"
# todo: improve method of getting current environment
# todo: pref avalon.Session over os.environ
workdir = os.path.normpath(os.environ["AVALON_WORKDIR"])
filename = "{}.####.exr".format(self.name)
filepath = os.path.join(
workdir,
"render",
ext,
filename
).replace("\\", "/")
with avalon.nuke.viewer_update_and_undo_stop():
w = nuke.createNode(
"Write",
"name {}".format(self.name))
# w.knob('colorspace').setValue()
w.knob('file').setValue(filepath)
w.knob('file_type').setValue(ext)
w.knob('datatype').setValue("16 bit half")
w.knob('compression').setValue("Zip (1 scanline)")
w.knob('create_directories').setValue(True)
w.knob('autocrop').setValue(True)
return data

View file

@ -1,7 +1,7 @@
from avalon import api
class NukeSelectContainers(api.InventoryAction):
class SelectContainers(api.InventoryAction):
label = "Select Containers"
icon = "mouse-pointer"

View file

@ -5,7 +5,7 @@
from avalon import api
class NukeSetFrameRangeLoader(api.Loader):
class SetFrameRangeLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
@ -38,7 +38,7 @@ class NukeSetFrameRangeLoader(api.Loader):
lib.update_frame_range(start, end)
class NukeSetFrameRangeWithHandlesLoader(api.Loader):
class SetFrameRangeWithHandlesLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",

View file

View file

View file

View file

@ -0,0 +1 @@

View file

@ -118,7 +118,7 @@ def loader_shift(node, frame, relative=True):
return int(shift)
class NukeLoadSequence(api.Loader):
class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
families = ["imagesequence"]

View file

@ -0,0 +1 @@
# usually used for mattepainting

View file

@ -1,18 +1,18 @@
import pyblish.api
class CollectCurrentFile(pyblish.api.ContextPlugin):
class SelectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Current File"
order = pyblish.api.CollectorOrder
hosts = ["nuke"]
families = ["workfile"]
def process(self, context):
import os
import nuke
current_file = nuke.root().name()
normalised = os.path.normpath(current_file)
context.data["current_file"] = normalised
context.data["currentFile"] = normalised

View file

@ -0,0 +1,58 @@
import os
import nuke
import pyblish.api
from pype.nuke.lib import get_avalon_knob_data
@pyblish.api.log
class CollectNukeInstances(pyblish.api.ContextPlugin):
"""Collect all nodes with Avalon knob."""
order = pyblish.api.CollectorOrder
label = "Collect Instances"
hosts = ["nuke", "nukeassist"]
def process(self, context):
instances = []
# creating instances per write node
for node in nuke.allNodes():
try:
if node["disable"].value():
continue
except Exception:
continue
# get data from avalon knob
avalon_knob_data = get_avalon_knob_data(node)
if not avalon_knob_data:
continue
subset = avalon_knob_data["subset"]
# Create instance
instance = context.create_instance(subset)
instance.add(node)
instance.data.update({
"asset": os.environ["AVALON_ASSET"],
"label": node.name(),
"name": node.name(),
"subset": subset,
"families": [avalon_knob_data["families"]],
"family": avalon_knob_data["family"],
"publish": node.knob("publish").value()
})
self.log.info("collected instance: {}".format(instance.data))
instances.append(instance)
context.data["instances"] = instances
# Sort/grouped by family (preserving local index)
context[:] = sorted(context, key=self.sort_by_family)
self.log.debug("context: {}".format(context))
def sort_by_family(self, instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -0,0 +1,90 @@
import os
import nuke
import pyblish.api
import logging
log = logging.getLogger(__name__)
@pyblish.api.log
class CollectNukeWrites(pyblish.api.ContextPlugin):
"""Collect all write nodes."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Writes"
hosts = ["nuke", "nukeassist"]
def process(self, context):
for instance in context.data["instances"]:
self.log.debug("checking instance: {}".format(instance))
node = instance[0]
if node.Class() != "Write":
continue
# Determine defined file type
ext = node["file_type"].value()
# Determine output type
output_type = "img"
if ext == "mov":
output_type = "mov"
# Get frame range
first_frame = int(nuke.root()["first_frame"].getValue())
last_frame = int(nuke.root()["last_frame"].getValue())
if node["use_limit"].getValue():
first_frame = int(node["first"].getValue())
last_frame = int(node["last"].getValue())
# get path
path = nuke.filename(node)
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# Include start and end render frame in label
name = node.name()
label = "{0} ({1}-{2})".format(
name,
int(first_frame),
int(last_frame)
)
# preredered frames
if not node["render"].value():
families = "prerendered.frames"
collected_frames = os.listdir(output_dir)
self.log.debug("collected_frames: {}".format(label))
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(collected_frames)
instance.data['transfer'] = False
else:
# dealing with local/farm rendering
if node["render_farm"].value():
families = "{}.farm".format(instance.data["families"][0])
else:
families = "{}.local".format(instance.data["families"][0])
self.log.debug("checking for error: {}".format(label))
instance.data.update({
"path": path,
"outputDir": output_dir,
"ext": ext,
"label": label,
"families": [families],
"firstFrame": first_frame,
"lastFrame": last_frame,
"outputType": output_type,
"stagingDir": output_dir,
})
self.log.debug("instance.data: {}".format(instance.data))
self.log.debug("context: {}".format(context))
def sort_by_family(self, instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -20,7 +20,7 @@ class ExtractOutputDirectory(pyblish.api.InstancePlugin):
path = instance.data["collection"].format()
if "output_path" in instance.data.keys():
path = instance.data["output_path"]
path = instance.data["path"]
if not path:
return

View file

@ -1,15 +0,0 @@
import nuke
import pyblish.api
class ExtractScriptSave(pyblish.api.InstancePlugin):
""" Saves the script before extraction. """
order = pyblish.api.ExtractorOrder - 0.49
label = "Script Save"
hosts = ["nuke"]
families = ["saver"]
def process(self, instance):
nuke.scriptSave()

View file

@ -0,0 +1,361 @@
import os
import logging
import shutil
import errno
import pyblish.api
from avalon import api, io
log = logging.getLogger(__name__)
class IntegrateFrames(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Frames"
order = pyblish.api.IntegratorOrder
families = ["prerendered.frames"]
def process(self, instance):
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
# self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"},
projection={"config.template.publish": True})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
# template_data = {"root": root,
# "project": PROJECT,
# "silo": asset['silo'],
# "asset": ASSET,
# "subset": subset["name"],
# "version": version["name"]}
hierarchy = io.find_one({"type":'asset', "name":ASSET})['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*hierarchy)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": "prjX"},
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"VERSION": version["name"],
"hierarchy": hierarchy}
template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
collection = files
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
assert not any(os.path.isabs(name) for name in collection)
template_data["representation"] = ext[1:]
for fname in collection:
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled.publish.path
# if instance.data.get('transfer', True):
# instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled.publish.path
# if instance.data.get('transfer', True):
# dst = src
# instance.data["transfers"].append([src, dst])
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': src},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": PROJECT,
"projectcode": "prjX",
'task': api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": version["name"],
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data["transfers"]
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "pype:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
# create relative source path for DB
relative_path = os.path.relpath(context.data["currentFile"],
api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["startFrame", "endFrame", "step", "handles"]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data[key]
return version_data

View file

@ -0,0 +1,48 @@
import pyblish.api
import nuke
class NukeRenderLocal(pyblish.api.InstancePlugin):
# TODO: rewrite docstring to nuke
"""Render the current Fusion composition locally.
Extract the result of savers by starting a comp render
This will run the local render of Fusion.
"""
order = pyblish.api.ExtractorOrder
label = "Render Local"
hosts = ["nuke"]
families = ["render.local", "prerender.local", "still.local"]
def process(self, instance):
# This should be a ContextPlugin, but this is a workaround
# for a bug in pyblish to run once for a family: issue #250
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
self.log.debug("instance collected: {}".format(instance.data))
first_frame = instance.data.get("firstFrame", None)
last_frame = instance.data.get("lastFrame", None)
node_subset_name = instance.data.get("name", None)
self.log.info("Starting render")
self.log.info("Start frame: {}".format(first_frame))
self.log.info("End frame: {}".format(last_frame))
# Render frames
nuke.execute(
node_subset_name,
int(first_frame),
int(last_frame)
)
# swith to prerendered.frames
instance[0]["render"].setValue(False)
self.log.info('Finished render')

View file

@ -0,0 +1,15 @@
import nuke
import pyblish.api
class ExtractScriptSave(pyblish.api.Extractor):
"""
"""
label = 'Script Save'
order = pyblish.api.Extractor.order - 0.45
hosts = ['nuke']
def process(self, instance):
self.log.info('saving script')
nuke.scriptSave()

View file

@ -0,0 +1,53 @@
import os
import pyblish.api
import clique
@pyblish.api.log
class RepairCollectionAction(pyblish.api.Action):
label = "Repair"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
files_remove = [os.path.join(context[0].data["outputDir"], f)
for f in context[0].data["files"]]
for f in files_remove:
os.remove(f)
self.log.debug("removing file: {}".format(f))
context[0][0]["render"].setValue(True)
self.log.info("Rendering toggled ON")
class ValidateCollection(pyblish.api.InstancePlugin):
""" Validates file output. """
order = pyblish.api.ValidatorOrder
# optional = True
families = ['prerendered.frames']
label = "Check prerendered frames"
hosts = ["nuke"]
actions = [RepairCollectionAction]
def process(self, instance):
self.log.debug('instance.data["files"]: {}'.format(instance.data['files']))
collections, remainder = clique.assemble(*instance.data['files'])
self.log.info('collections: {}'.format(str(collections)))
frame_length = instance.data["lastFrame"] \
- instance.data["firstFrame"] + 1
if frame_length is not 1:
assert len(collections) == 1, self.log.info(
"There are multiple collections in the folder")
assert collections[0].is_contiguous(), self.log.info("Some frames appear to be missing")
assert remainder is not None, self.log.info("There are some extra files in folder")
self.log.info('frame_length: {}'.format(frame_length))
self.log.info('len(list(instance.data["files"])): {}'.format(
len(list(instance.data["files"][0]))))
assert len(list(instance.data["files"][0])) is frame_length, self.log.info(
"{} missing frames. Use repair to render all frames".format(__name__))

View file

@ -1,20 +0,0 @@
import os
import pyblish.api
@pyblish.api.log
class ValidatePrerendersOutput(pyblish.api.Validator):
"""Validates that the output directory for the write nodes exists"""
families = ['write.prerender']
hosts = ['nuke']
label = 'Pre-renders output'
def process(self, instance):
path = os.path.dirname(instance[0]['file'].value())
if 'output' not in path:
name = instance[0].name()
msg = 'Output directory for %s is not in an "output" folder.' % name
raise ValueError(msg)

100
pype/templates.py Normal file
View file

@ -0,0 +1,100 @@
import os
import re
from avalon import io
from app.api import (Templates, Logger, format)
log = Logger.getLogger(__name__,
os.getenv("AVALON_APP", "pype-config"))
def load_data_from_templates():
from . import api
if not any([
api.Dataflow,
api.Anatomy,
api.Colorspace,
api.Metadata
]
):
# base = Templates()
t = Templates(type=["anatomy", "metadata", "dataflow", "colorspace"])
api.Anatomy = t.anatomy
api.Metadata = t.metadata.format()
data = {"metadata": api.Metadata}
api.Dataflow = t.dataflow.format(data)
api.Colorspace = t.colorspace
log.info("Data from templates were Loaded...")
def reset_data_from_templates():
from . import api
api.Dataflow = None
api.Anatomy = None
api.Colorspace = None
api.Metadata = None
log.info("Data from templates were Unloaded...")
def get_version_from_workfile(file):
pattern = re.compile(r"_v([0-9]*)")
try:
v_string = pattern.findall(file)[0]
return v_string
except IndexError:
log.error("templates:get_version_from_workfile:"
"`{}` missing version string."
"Example `v004`".format(file))
def get_project_code():
return io.find_one({"type": "project"})["data"]["code"]
def get_project_name():
project_name = os.getenv("AVALON_PROJECT", None)
assert project_name, log.error("missing `AVALON_PROJECT`"
"in environment variables")
return project_name
def get_asset():
asset = os.getenv("AVALON_ASSET", None)
assert asset, log.error("missing `AVALON_ASSET`"
"in environment variables")
return asset
def get_task():
task = os.getenv("AVALON_TASK", None)
assert task, log.error("missing `AVALON_TASK`"
"in environment variables")
return task
def get_hiearchy():
hierarchy = io.find_one({
"type": 'asset',
"name": get_asset()}
)['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
return os.path.join(*hierarchy)
def fill_avalon_workdir():
awd = os.getenv("AVALON_WORKDIR", None)
assert awd, log.error("missing `AVALON_WORKDIR`"
"in environment variables")
if "{" not in awd:
return
data = {
"hierarchy": get_hiearchy(),
"task": get_task(),
"asset": get_asset(),
"project": {"name": get_project_name(),
"code": get_project_code()}}
awd_filled = os.path.normpath(format(awd, data))
os.environ["AVALON_WORKDIR"] = awd_filled
log.info("`AVALON_WORKDIR` fixed to: {}".format(awd_filled))

1
pype/vendor/backports/__init__.py vendored Normal file
View file

@ -0,0 +1 @@
__path__ = __import__('pkgutil').extend_path(__path__, __name__)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,171 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import MutableMapping
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from io import open
import sys
try:
from thread import get_ident
except ImportError:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
str = type('str')
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc
# from reprlib 3.2.1
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
# from collections 3.2.1
class _ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from collections import ChainMap
except ImportError:
ChainMap = _ChainMap

View file

@ -0,0 +1,184 @@
from __future__ import absolute_import
import functools
from collections import namedtuple
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
@functools.wraps(functools.update_wrapper)
def update_wrapper(wrapper,
wrapped,
assigned = functools.WRAPPER_ASSIGNMENTS,
updated = functools.WRAPPER_UPDATES):
"""
Patch two bugs in functools.update_wrapper.
"""
# workaround for http://bugs.python.org/issue3445
assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr))
wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated)
# workaround for https://bugs.python.org/issue17482
wrapper.__wrapped__ = wrapped
return wrapper
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark=(object(),),
fasttypes=set([int, str, frozenset, type(None)]),
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function

32
pype/vendor/ftrack_api_old/__init__.py vendored Normal file
View file

@ -0,0 +1,32 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from ._version import __version__
from .session import Session
def mixin(instance, mixin_class, name=None):
'''Mixin *mixin_class* to *instance*.
*name* can be used to specify new class name. If not specified then one will
be generated.
'''
if name is None:
name = '{0}{1}'.format(
instance.__class__.__name__, mixin_class.__name__
)
# Check mixin class not already present in mro in order to avoid consistent
# method resolution failure.
if mixin_class in instance.__class__.mro():
return
instance.__class__ = type(
name,
(
mixin_class,
instance.__class__
),
{}
)

View file

@ -0,0 +1,656 @@
# :coding: utf-8
# :copyright: Copyright (c) 2016 ftrack
from __future__ import absolute_import
import logging
import json
import sys
import os
import ftrack_api_old
import ftrack_api_old.structure.standard as _standard
from ftrack_api_old.logging import LazyLogMessage as L
scenario_name = 'ftrack.centralized-storage'
class ConfigureCentralizedStorageScenario(object):
'''Configure a centralized storage scenario.'''
def __init__(self):
'''Instansiate centralized storage scenario.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
@property
def storage_scenario(self):
'''Return storage scenario setting.'''
return self.session.query(
'select value from Setting '
'where name is "storage_scenario" and group is "STORAGE"'
).one()
@property
def existing_centralized_storage_configuration(self):
'''Return existing centralized storage configuration.'''
storage_scenario = self.storage_scenario
try:
configuration = json.loads(storage_scenario['value'])
except (ValueError, TypeError):
return None
if not isinstance(configuration, dict):
return None
if configuration.get('scenario') != scenario_name:
return None
return configuration.get('data', {})
def _get_confirmation_text(self, configuration):
'''Return confirmation text from *configuration*.'''
configure_location = configuration.get('configure_location')
select_location = configuration.get('select_location')
select_mount_point = configuration.get('select_mount_point')
if configure_location:
location_text = unicode(
'A new location will be created:\n\n'
'* Label: {location_label}\n'
'* Name: {location_name}\n'
'* Description: {location_description}\n'
).format(**configure_location)
else:
location = self.session.get(
'Location', select_location['location_id']
)
location_text = (
u'You have choosen to use an existing location: {0}'.format(
location['label']
)
)
mount_points_text = unicode(
'* Linux: {linux}\n'
'* OS X: {osx}\n'
'* Windows: {windows}\n\n'
).format(
linux=select_mount_point.get('linux_mount_point') or '*Not set*',
osx=select_mount_point.get('osx_mount_point') or '*Not set*',
windows=select_mount_point.get('windows_mount_point') or '*Not set*'
)
mount_points_not_set = []
if not select_mount_point.get('linux_mount_point'):
mount_points_not_set.append('Linux')
if not select_mount_point.get('osx_mount_point'):
mount_points_not_set.append('OS X')
if not select_mount_point.get('windows_mount_point'):
mount_points_not_set.append('Windows')
if mount_points_not_set:
mount_points_text += unicode(
'Please be aware that this location will not be working on '
'{missing} because the mount points are not set up.'
).format(
missing=' and '.join(mount_points_not_set)
)
text = unicode(
'#Confirm storage setup#\n\n'
'Almost there! Please take a moment to verify the settings you '
'are about to save. You can always come back later and update the '
'configuration.\n'
'##Location##\n\n'
'{location}\n'
'##Mount points##\n\n'
'{mount_points}'
).format(
location=location_text,
mount_points=mount_points_text
)
return text
def configure_scenario(self, event):
'''Configure scenario based on *event* and return form items.'''
steps = (
'select_scenario',
'select_location',
'configure_location',
'select_structure',
'select_mount_point',
'confirm_summary',
'save_configuration'
)
warning_message = ''
values = event['data'].get('values', {})
# Calculate previous step and the next.
previous_step = values.get('step', 'select_scenario')
next_step = steps[steps.index(previous_step) + 1]
state = 'configuring'
self.logger.info(L(
u'Configuring scenario, previous step: {0}, next step: {1}. '
u'Values {2!r}.',
previous_step, next_step, values
))
if 'configuration' in values:
configuration = values.pop('configuration')
else:
configuration = {}
if values:
# Update configuration with values from the previous step.
configuration[previous_step] = values
if previous_step == 'select_location':
values = configuration['select_location']
if values.get('location_id') != 'create_new_location':
location_exists = self.session.query(
'Location where id is "{0}"'.format(
values.get('location_id')
)
).first()
if not location_exists:
next_step = 'select_location'
warning_message = (
'**The selected location does not exist. Please choose '
'one from the dropdown or create a new one.**'
)
if next_step == 'select_location':
try:
location_id = (
self.existing_centralized_storage_configuration['location_id']
)
except (KeyError, TypeError):
location_id = None
options = [{
'label': 'Create new location',
'value': 'create_new_location'
}]
for location in self.session.query(
'select name, label, description from Location'
):
if location['name'] not in (
'ftrack.origin', 'ftrack.unmanaged', 'ftrack.connect',
'ftrack.server', 'ftrack.review'
):
options.append({
'label': u'{label} ({name})'.format(
label=location['label'], name=location['name']
),
'description': location['description'],
'value': location['id']
})
warning = ''
if location_id is not None:
# If there is already a location configured we must make the
# user aware that changing the location may be problematic.
warning = (
'\n\n**Be careful if you switch to another location '
'for an existing storage scenario. Components that have '
'already been published to the previous location will be '
'made unavailable for common use.**'
)
default_value = location_id
elif location_id is None and len(options) == 1:
# No location configured and no existing locations to use.
default_value = 'create_new_location'
else:
# There are existing locations to choose from but non of them
# are currently active in the centralized storage scenario.
default_value = None
items = [{
'type': 'label',
'value': (
'#Select location#\n'
'Choose an already existing location or create a new one '
'to represent your centralized storage. {0}'.format(
warning
)
)
}, {
'type': 'enumerator',
'label': 'Location',
'name': 'location_id',
'value': default_value,
'data': options
}]
default_location_name = 'studio.central-storage-location'
default_location_label = 'Studio location'
default_location_description = (
'The studio central location where all components are '
'stored.'
)
if previous_step == 'configure_location':
configure_location = configuration.get(
'configure_location'
)
if configure_location:
try:
existing_location = self.session.query(
u'Location where name is "{0}"'.format(
configure_location.get('location_name')
)
).first()
except UnicodeEncodeError:
next_step = 'configure_location'
warning_message += (
'**The location name contains non-ascii characters. '
'Please change the name and try again.**'
)
values = configuration['select_location']
else:
if existing_location:
next_step = 'configure_location'
warning_message += (
u'**There is already a location named {0}. '
u'Please change the name and try again.**'.format(
configure_location.get('location_name')
)
)
values = configuration['select_location']
if (
not configure_location.get('location_name') or
not configure_location.get('location_label') or
not configure_location.get('location_description')
):
next_step = 'configure_location'
warning_message += (
'**Location name, label and description cannot '
'be empty.**'
)
values = configuration['select_location']
if next_step == 'configure_location':
# Populate form with previous configuration.
default_location_label = configure_location['location_label']
default_location_name = configure_location['location_name']
default_location_description = (
configure_location['location_description']
)
if next_step == 'configure_location':
if values.get('location_id') == 'create_new_location':
# Add options to create a new location.
items = [{
'type': 'label',
'value': (
'#Create location#\n'
'Here you will create a new location to be used '
'with your new Storage scenario. For your '
'convenience we have already filled in some default '
'values. If this is the first time you are configuring '
'a storage scenario in ftrack we recommend that you '
'stick with these settings.'
)
}, {
'label': 'Label',
'name': 'location_label',
'value': default_location_label,
'type': 'text'
}, {
'label': 'Name',
'name': 'location_name',
'value': default_location_name,
'type': 'text'
}, {
'label': 'Description',
'name': 'location_description',
'value': default_location_description,
'type': 'text'
}]
else:
# The user selected an existing location. Move on to next
# step.
next_step = 'select_mount_point'
if next_step == 'select_structure':
# There is only one structure to choose from, go to next step.
next_step = 'select_mount_point'
# items = [
# {
# 'type': 'label',
# 'value': (
# '#Select structure#\n'
# 'Select which structure to use with your location. '
# 'The structure is used to generate the filesystem '
# 'path for components that are added to this location.'
# )
# },
# {
# 'type': 'enumerator',
# 'label': 'Structure',
# 'name': 'structure_id',
# 'value': 'standard',
# 'data': [{
# 'label': 'Standard',
# 'value': 'standard',
# 'description': (
# 'The Standard structure uses the names in your '
# 'project structure to determine the path.'
# )
# }]
# }
# ]
if next_step == 'select_mount_point':
try:
mount_points = (
self.existing_centralized_storage_configuration['accessor']['mount_points']
)
except (KeyError, TypeError):
mount_points = dict()
items = [
{
'value': (
'#Mount points#\n'
'Set mount points for your centralized storage '
'location. For the location to work as expected each '
'platform that you intend to use must have the '
'corresponding mount point set and the storage must '
'be accessible. If not set correctly files will not be '
'saved or read.'
),
'type': 'label'
}, {
'type': 'text',
'label': 'Linux',
'name': 'linux_mount_point',
'empty_text': 'E.g. /usr/mnt/MyStorage ...',
'value': mount_points.get('linux', '')
}, {
'type': 'text',
'label': 'OS X',
'name': 'osx_mount_point',
'empty_text': 'E.g. /Volumes/MyStorage ...',
'value': mount_points.get('osx', '')
}, {
'type': 'text',
'label': 'Windows',
'name': 'windows_mount_point',
'empty_text': 'E.g. \\\\MyStorage ...',
'value': mount_points.get('windows', '')
}
]
if next_step == 'confirm_summary':
items = [{
'type': 'label',
'value': self._get_confirmation_text(configuration)
}]
state = 'confirm'
if next_step == 'save_configuration':
mount_points = configuration['select_mount_point']
select_location = configuration['select_location']
if select_location['location_id'] == 'create_new_location':
configure_location = configuration['configure_location']
location = self.session.create(
'Location',
{
'name': configure_location['location_name'],
'label': configure_location['location_label'],
'description': (
configure_location['location_description']
)
}
)
else:
location = self.session.query(
'Location where id is "{0}"'.format(
select_location['location_id']
)
).one()
setting_value = json.dumps({
'scenario': scenario_name,
'data': {
'location_id': location['id'],
'location_name': location['name'],
'accessor': {
'mount_points': {
'linux': mount_points['linux_mount_point'],
'osx': mount_points['osx_mount_point'],
'windows': mount_points['windows_mount_point']
}
}
}
})
self.storage_scenario['value'] = setting_value
self.session.commit()
# Broadcast an event that storage scenario has been configured.
event = ftrack_api_old.event.base.Event(
topic='ftrack.storage-scenario.configure-done'
)
self.session.event_hub.publish(event)
items = [{
'type': 'label',
'value': (
'#Done!#\n'
'Your storage scenario is now configured and ready '
'to use. **Note that you may have to restart Connect and '
'other applications to start using it.**'
)
}]
state = 'done'
if warning_message:
items.insert(0, {
'type': 'label',
'value': warning_message
})
items.append({
'type': 'hidden',
'value': configuration,
'name': 'configuration'
})
items.append({
'type': 'hidden',
'value': next_step,
'name': 'step'
})
return {
'items': items,
'state': state
}
def discover_centralized_scenario(self, event):
'''Return action discover dictionary for *event*.'''
return {
'id': scenario_name,
'name': 'Centralized storage scenario',
'description': (
'(Recommended) centralized storage scenario where all files '
'are kept on a storage that is mounted and available to '
'everyone in the studio.'
)
}
def register(self, session):
'''Subscribe to events on *session*.'''
self.session = session
#: TODO: Move these to a separate function.
session.event_hub.subscribe(
unicode(
'topic=ftrack.storage-scenario.discover '
'and source.user.username="{0}"'
).format(
session.api_user
),
self.discover_centralized_scenario
)
session.event_hub.subscribe(
unicode(
'topic=ftrack.storage-scenario.configure '
'and data.scenario_id="{0}" '
'and source.user.username="{1}"'
).format(
scenario_name,
session.api_user
),
self.configure_scenario
)
class ActivateCentralizedStorageScenario(object):
'''Activate a centralized storage scenario.'''
def __init__(self):
'''Instansiate centralized storage scenario.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
def activate(self, event):
'''Activate scenario in *event*.'''
storage_scenario = event['data']['storage_scenario']
try:
location_data = storage_scenario['data']
location_name = location_data['location_name']
location_id = location_data['location_id']
mount_points = location_data['accessor']['mount_points']
except KeyError:
error_message = (
'Unable to read storage scenario data.'
)
self.logger.error(L(error_message))
raise ftrack_api_old.exception.LocationError(
'Unable to configure location based on scenario.'
)
else:
location = self.session.create(
'Location',
data=dict(
name=location_name,
id=location_id
),
reconstructing=True
)
if sys.platform == 'darwin':
prefix = mount_points['osx']
elif sys.platform == 'linux2':
prefix = mount_points['linux']
elif sys.platform == 'win32':
prefix = mount_points['windows']
else:
raise ftrack_api_old.exception.LocationError(
(
'Unable to find accessor prefix for platform {0}.'
).format(sys.platform)
)
location.accessor = ftrack_api_old.accessor.disk.DiskAccessor(
prefix=prefix
)
location.structure = _standard.StandardStructure()
location.priority = 1
self.logger.info(L(
u'Storage scenario activated. Configured {0!r} from '
u'{1!r}',
location, storage_scenario
))
def _verify_startup(self, event):
'''Verify the storage scenario configuration.'''
storage_scenario = event['data']['storage_scenario']
location_data = storage_scenario['data']
mount_points = location_data['accessor']['mount_points']
prefix = None
if sys.platform == 'darwin':
prefix = mount_points['osx']
elif sys.platform == 'linux2':
prefix = mount_points['linux']
elif sys.platform == 'win32':
prefix = mount_points['windows']
if not prefix:
return (
u'The storage scenario has not been configured for your '
u'operating system. ftrack may not be able to '
u'store and track files correctly.'
)
if not os.path.isdir(prefix):
return (
unicode(
'The path {0} does not exist. ftrack may not be able to '
'store and track files correctly. \n\nIf the storage is '
'newly setup you may want to create necessary folder '
'structures. If the storage is a network drive you should '
'make sure that it is mounted correctly.'
).format(prefix)
)
def register(self, session):
'''Subscribe to events on *session*.'''
self.session = session
session.event_hub.subscribe(
(
'topic=ftrack.storage-scenario.activate '
'and data.storage_scenario.scenario="{0}"'.format(
scenario_name
)
),
self.activate
)
# Listen to verify startup event from ftrack connect to allow responding
# with a message if something is not working correctly with this
# scenario that the user should be notified about.
self.session.event_hub.subscribe(
(
'topic=ftrack.connect.verify-startup '
'and data.storage_scenario.scenario="{0}"'.format(
scenario_name
)
),
self._verify_startup
)
def register(session):
'''Register storage scenario.'''
scenario = ActivateCentralizedStorageScenario()
scenario.register(session)
def register_configuration(session):
'''Register storage scenario.'''
scenario = ConfigureCentralizedStorageScenario()
scenario.register(session)

View file

@ -0,0 +1,534 @@
# pragma: no cover
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass

View file

@ -0,0 +1 @@
__version__ = '1.3.3'

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,124 @@
# :coding: utf-8
# :copyright: Copyright (c) 2013 ftrack
import abc
import ftrack_api_old.exception
class Accessor(object):
'''Provide data access to a location.
A location represents a specific storage, but access to that storage may
vary. For example, both local filesystem and FTP access may be possible for
the same storage. An accessor implements these different ways of accessing
the same data location.
As different accessors may access the same location, only part of a data
path that is commonly understood may be stored in the database. The format
of this path should be a contract between the accessors that require access
to the same location and is left as an implementation detail. As such, this
system provides no guarantee that two different accessors can provide access
to the same location, though this is a clear goal. The path stored centrally
is referred to as the **resource identifier** and should be used when
calling any of the accessor methods that accept a *resource_identifier*
argument.
'''
__metaclass__ = abc.ABCMeta
def __init__(self):
'''Initialise location accessor.'''
super(Accessor, self).__init__()
@abc.abstractmethod
def list(self, resource_identifier):
'''Return list of entries in *resource_identifier* container.
Each entry in the returned list should be a valid resource identifier.
Raise :exc:`~ftrack_api_old.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist or
:exc:`~ftrack_api_old.exception.AccessorResourceInvalidError` if
*resource_identifier* is not a container.
'''
@abc.abstractmethod
def exists(self, resource_identifier):
'''Return if *resource_identifier* is valid and exists in location.'''
@abc.abstractmethod
def is_file(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file.'''
@abc.abstractmethod
def is_container(self, resource_identifier):
'''Return whether *resource_identifier* refers to a container.'''
@abc.abstractmethod
def is_sequence(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file sequence.'''
@abc.abstractmethod
def open(self, resource_identifier, mode='rb'):
'''Return :class:`~ftrack_api_old.data.Data` for *resource_identifier*.'''
@abc.abstractmethod
def remove(self, resource_identifier):
'''Remove *resource_identifier*.
Raise :exc:`~ftrack_api_old.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist.
'''
@abc.abstractmethod
def make_container(self, resource_identifier, recursive=True):
'''Make a container at *resource_identifier*.
If *recursive* is True, also make any intermediate containers.
Should silently ignore existing containers and not recreate them.
'''
@abc.abstractmethod
def get_container(self, resource_identifier):
'''Return resource_identifier of container for *resource_identifier*.
Raise :exc:`~ftrack_api_old.exception.AccessorParentResourceNotFoundError`
if container of *resource_identifier* could not be determined.
'''
def remove_container(self, resource_identifier): # pragma: no cover
'''Remove container at *resource_identifier*.'''
return self.remove(resource_identifier)
def get_filesystem_path(self, resource_identifier): # pragma: no cover
'''Return filesystem path for *resource_identifier*.
Raise :exc:`~ftrack_api_old.exception.AccessorFilesystemPathError` if
filesystem path could not be determined from *resource_identifier* or
:exc:`~ftrack_api_old.exception.AccessorUnsupportedOperationError` if
retrieving filesystem paths is not supported by this accessor.
'''
raise ftrack_api_old.exception.AccessorUnsupportedOperationError(
'get_filesystem_path', resource_identifier=resource_identifier
)
def get_url(self, resource_identifier):
'''Return URL for *resource_identifier*.
Raise :exc:`~ftrack_api_old.exception.AccessorFilesystemPathError` if
URL could not be determined from *resource_identifier* or
:exc:`~ftrack_api_old.exception.AccessorUnsupportedOperationError` if
retrieving URL is not supported by this accessor.
'''
raise ftrack_api_old.exception.AccessorUnsupportedOperationError(
'get_url', resource_identifier=resource_identifier
)

View file

@ -0,0 +1,250 @@
# :coding: utf-8
# :copyright: Copyright (c) 2013 ftrack
import os
import sys
import errno
import contextlib
import ftrack_api_old._python_ntpath as ntpath
import ftrack_api_old.accessor.base
import ftrack_api_old.data
from ftrack_api_old.exception import (
AccessorFilesystemPathError,
AccessorUnsupportedOperationError,
AccessorResourceNotFoundError,
AccessorOperationFailedError,
AccessorPermissionDeniedError,
AccessorResourceInvalidError,
AccessorContainerNotEmptyError,
AccessorParentResourceNotFoundError
)
class DiskAccessor(ftrack_api_old.accessor.base.Accessor):
'''Provide disk access to a location.
Expect resource identifiers to refer to relative filesystem paths.
'''
def __init__(self, prefix, **kw):
'''Initialise location accessor.
*prefix* specifies the base folder for the disk based structure and
will be prepended to any path. It should be specified in the syntax of
the current OS.
'''
if prefix:
prefix = os.path.expanduser(os.path.expandvars(prefix))
prefix = os.path.abspath(prefix)
self.prefix = prefix
super(DiskAccessor, self).__init__(**kw)
def list(self, resource_identifier):
'''Return list of entries in *resource_identifier* container.
Each entry in the returned list should be a valid resource identifier.
Raise :exc:`~ftrack_api_old.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist or
:exc:`~ftrack_api_old.exception.AccessorResourceInvalidError` if
*resource_identifier* is not a container.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
with error_handler(
operation='list', resource_identifier=resource_identifier
):
listing = []
for entry in os.listdir(filesystem_path):
listing.append(os.path.join(resource_identifier, entry))
return listing
def exists(self, resource_identifier):
'''Return if *resource_identifier* is valid and exists in location.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
return os.path.exists(filesystem_path)
def is_file(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
return os.path.isfile(filesystem_path)
def is_container(self, resource_identifier):
'''Return whether *resource_identifier* refers to a container.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
return os.path.isdir(filesystem_path)
def is_sequence(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file sequence.'''
raise AccessorUnsupportedOperationError(operation='is_sequence')
def open(self, resource_identifier, mode='rb'):
'''Return :class:`~ftrack_api_old.Data` for *resource_identifier*.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
with error_handler(
operation='open', resource_identifier=resource_identifier
):
data = ftrack_api_old.data.File(filesystem_path, mode)
return data
def remove(self, resource_identifier):
'''Remove *resource_identifier*.
Raise :exc:`~ftrack_api_old.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
if self.is_file(resource_identifier):
with error_handler(
operation='remove', resource_identifier=resource_identifier
):
os.remove(filesystem_path)
elif self.is_container(resource_identifier):
with error_handler(
operation='remove', resource_identifier=resource_identifier
):
os.rmdir(filesystem_path)
else:
raise AccessorResourceNotFoundError(
resource_identifier=resource_identifier
)
def make_container(self, resource_identifier, recursive=True):
'''Make a container at *resource_identifier*.
If *recursive* is True, also make any intermediate containers.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
with error_handler(
operation='makeContainer', resource_identifier=resource_identifier
):
try:
if recursive:
os.makedirs(filesystem_path)
else:
try:
os.mkdir(filesystem_path)
except OSError as error:
if error.errno == errno.ENOENT:
raise AccessorParentResourceNotFoundError(
resource_identifier=resource_identifier
)
else:
raise
except OSError, error:
if error.errno != errno.EEXIST:
raise
def get_container(self, resource_identifier):
'''Return resource_identifier of container for *resource_identifier*.
Raise :exc:`~ftrack_api_old.exception.AccessorParentResourceNotFoundError` if
container of *resource_identifier* could not be determined.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
container = os.path.dirname(filesystem_path)
if self.prefix:
if not container.startswith(self.prefix):
raise AccessorParentResourceNotFoundError(
resource_identifier=resource_identifier,
message='Could not determine container for '
'{resource_identifier} as container falls outside '
'of configured prefix.'
)
# Convert container filesystem path into resource identifier.
container = container[len(self.prefix):]
if ntpath.isabs(container):
# Ensure that resulting path is relative by stripping any
# leftover prefixed slashes from string.
# E.g. If prefix was '/tmp' and path was '/tmp/foo/bar' the
# result will be 'foo/bar'.
container = container.lstrip('\\/')
return container
def get_filesystem_path(self, resource_identifier):
'''Return filesystem path for *resource_identifier*.
For example::
>>> accessor = DiskAccessor('my.location', '/mountpoint')
>>> print accessor.get_filesystem_path('test.txt')
/mountpoint/test.txt
>>> print accessor.get_filesystem_path('/mountpoint/test.txt')
/mountpoint/test.txt
Raise :exc:`ftrack_api_old.exception.AccessorFilesystemPathError` if filesystem
path could not be determined from *resource_identifier*.
'''
filesystem_path = resource_identifier
if filesystem_path:
filesystem_path = os.path.normpath(filesystem_path)
if self.prefix:
if not os.path.isabs(filesystem_path):
filesystem_path = os.path.normpath(
os.path.join(self.prefix, filesystem_path)
)
if not filesystem_path.startswith(self.prefix):
raise AccessorFilesystemPathError(
resource_identifier=resource_identifier,
message='Could not determine access path for '
'resource_identifier outside of configured prefix: '
'{resource_identifier}.'
)
return filesystem_path
@contextlib.contextmanager
def error_handler(**kw):
'''Conform raised OSError/IOError exception to appropriate FTrack error.'''
try:
yield
except (OSError, IOError) as error:
(exception_type, exception_value, traceback) = sys.exc_info()
kw.setdefault('error', error)
error_code = getattr(error, 'errno')
if not error_code:
raise AccessorOperationFailedError(**kw), None, traceback
if error_code == errno.ENOENT:
raise AccessorResourceNotFoundError(**kw), None, traceback
elif error_code == errno.EPERM:
raise AccessorPermissionDeniedError(**kw), None, traceback
elif error_code == errno.ENOTEMPTY:
raise AccessorContainerNotEmptyError(**kw), None, traceback
elif error_code in (errno.ENOTDIR, errno.EISDIR, errno.EINVAL):
raise AccessorResourceInvalidError(**kw), None, traceback
else:
raise AccessorOperationFailedError(**kw), None, traceback
except Exception:
raise

View file

@ -0,0 +1,240 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import os
import hashlib
import base64
import json
import requests
from .base import Accessor
from ..data import String
import ftrack_api_old.exception
import ftrack_api_old.symbol
class ServerFile(String):
'''Representation of a server file.'''
def __init__(self, resource_identifier, session, mode='rb'):
'''Initialise file.'''
self.mode = mode
self.resource_identifier = resource_identifier
self._session = session
self._has_read = False
super(ServerFile, self).__init__()
def flush(self):
'''Flush all changes.'''
super(ServerFile, self).flush()
if self.mode == 'wb':
self._write()
def read(self, limit=None):
'''Read file.'''
if not self._has_read:
self._read()
self._has_read = True
return super(ServerFile, self).read(limit)
def _read(self):
'''Read all remote content from key into wrapped_file.'''
position = self.tell()
self.seek(0)
response = requests.get(
'{0}/component/get'.format(self._session.server_url),
params={
'id': self.resource_identifier,
'username': self._session.api_user,
'apiKey': self._session.api_key
},
stream=True
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
raise ftrack_api_old.exception.AccessorOperationFailedError(
'Failed to read data: {0}.'.format(error)
)
for block in response.iter_content(ftrack_api_old.symbol.CHUNK_SIZE):
self.wrapped_file.write(block)
self.flush()
self.seek(position)
def _write(self):
'''Write current data to remote key.'''
position = self.tell()
self.seek(0)
# Retrieve component from cache to construct a filename.
component = self._session.get('FileComponent', self.resource_identifier)
if not component:
raise ftrack_api_old.exception.AccessorOperationFailedError(
'Unable to retrieve component with id: {0}.'.format(
self.resource_identifier
)
)
# Construct a name from component name and file_type.
name = component['name']
if component['file_type']:
name = u'{0}.{1}'.format(
name,
component['file_type'].lstrip('.')
)
try:
metadata = self._session.get_upload_metadata(
component_id=self.resource_identifier,
file_name=name,
file_size=self._get_size(),
checksum=self._compute_checksum()
)
except Exception as error:
raise ftrack_api_old.exception.AccessorOperationFailedError(
'Failed to get put metadata: {0}.'.format(error)
)
# Ensure at beginning of file before put.
self.seek(0)
# Put the file based on the metadata.
response = requests.put(
metadata['url'],
data=self.wrapped_file,
headers=metadata['headers']
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
raise ftrack_api_old.exception.AccessorOperationFailedError(
'Failed to put file to server: {0}.'.format(error)
)
self.seek(position)
def _get_size(self):
'''Return size of file in bytes.'''
position = self.tell()
self.seek(0, os.SEEK_END)
length = self.tell()
self.seek(position)
return length
def _compute_checksum(self):
'''Return checksum for file.'''
fp = self.wrapped_file
buf_size = ftrack_api_old.symbol.CHUNK_SIZE
hash_obj = hashlib.md5()
spos = fp.tell()
s = fp.read(buf_size)
while s:
hash_obj.update(s)
s = fp.read(buf_size)
base64_digest = base64.encodestring(hash_obj.digest())
if base64_digest[-1] == '\n':
base64_digest = base64_digest[0:-1]
fp.seek(spos)
return base64_digest
class _ServerAccessor(Accessor):
'''Provide server location access.'''
def __init__(self, session, **kw):
'''Initialise location accessor.'''
super(_ServerAccessor, self).__init__(**kw)
self._session = session
def open(self, resource_identifier, mode='rb'):
'''Return :py:class:`~ftrack_api_old.Data` for *resource_identifier*.'''
return ServerFile(resource_identifier, session=self._session, mode=mode)
def remove(self, resourceIdentifier):
'''Remove *resourceIdentifier*.'''
response = requests.get(
'{0}/component/remove'.format(self._session.server_url),
params={
'id': resourceIdentifier,
'username': self._session.api_user,
'apiKey': self._session.api_key
}
)
if response.status_code != 200:
raise ftrack_api_old.exception.AccessorOperationFailedError(
'Failed to remove file.'
)
def get_container(self, resource_identifier):
'''Return resource_identifier of container for *resource_identifier*.'''
return None
def make_container(self, resource_identifier, recursive=True):
'''Make a container at *resource_identifier*.'''
def list(self, resource_identifier):
'''Return list of entries in *resource_identifier* container.'''
raise NotImplementedError()
def exists(self, resource_identifier):
'''Return if *resource_identifier* is valid and exists in location.'''
return False
def is_file(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file.'''
raise NotImplementedError()
def is_container(self, resource_identifier):
'''Return whether *resource_identifier* refers to a container.'''
raise NotImplementedError()
def is_sequence(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file sequence.'''
raise NotImplementedError()
def get_url(self, resource_identifier):
'''Return url for *resource_identifier*.'''
url_string = (
u'{url}/component/get?id={id}&username={username}'
u'&apiKey={apiKey}'
)
return url_string.format(
url=self._session.server_url,
id=resource_identifier,
username=self._session.api_user,
apiKey=self._session.api_key
)
def get_thumbnail_url(self, resource_identifier, size=None):
'''Return thumbnail url for *resource_identifier*.
Optionally, specify *size* to constrain the downscaled image to size
x size pixels.
'''
url_string = (
u'{url}/component/thumbnail?id={id}&username={username}'
u'&apiKey={apiKey}'
)
url = url_string.format(
url=self._session.server_url,
id=resource_identifier,
username=self._session.api_user,
apiKey=self._session.api_key
)
if size:
url += u'&size={0}'.format(size)
return url

697
pype/vendor/ftrack_api_old/attribute.py vendored Normal file
View file

@ -0,0 +1,697 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import collections
import copy
import logging
import functools
import ftrack_api_old.symbol
import ftrack_api_old.exception
import ftrack_api_old.collection
import ftrack_api_old.inspection
import ftrack_api_old.operation
logger = logging.getLogger(
__name__
)
def merge_references(function):
'''Decorator to handle merging of references / collections.'''
@functools.wraps(function)
def get_value(attribute, entity):
'''Merge the attribute with the local cache.'''
if attribute.name not in entity._inflated:
# Only merge on first access to avoid
# inflating them multiple times.
logger.debug(
'Merging potential new data into attached '
'entity for attribute {0}.'.format(
attribute.name
)
)
# Local attributes.
local_value = attribute.get_local_value(entity)
if isinstance(
local_value,
(
ftrack_api_old.entity.base.Entity,
ftrack_api_old.collection.Collection,
ftrack_api_old.collection.MappedCollectionProxy
)
):
logger.debug(
'Merging local value for attribute {0}.'.format(attribute)
)
merged_local_value = entity.session._merge(
local_value, merged=dict()
)
if merged_local_value is not local_value:
with entity.session.operation_recording(False):
attribute.set_local_value(entity, merged_local_value)
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api_old.entity.base.Entity,
ftrack_api_old.collection.Collection,
ftrack_api_old.collection.MappedCollectionProxy
)
):
logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
merged_remote_value = entity.session._merge(
remote_value, merged=dict()
)
if merged_remote_value is not remote_value:
attribute.set_remote_value(entity, merged_remote_value)
entity._inflated.add(
attribute.name
)
return function(
attribute, entity
)
return get_value
class Attributes(object):
'''Collection of properties accessible by name.'''
def __init__(self, attributes=None):
super(Attributes, self).__init__()
self._data = dict()
if attributes is not None:
for attribute in attributes:
self.add(attribute)
def add(self, attribute):
'''Add *attribute*.'''
existing = self._data.get(attribute.name, None)
if existing:
raise ftrack_api_old.exception.NotUniqueError(
'Attribute with name {0} already added as {1}'
.format(attribute.name, existing)
)
self._data[attribute.name] = attribute
def remove(self, attribute):
'''Remove attribute.'''
self._data.pop(attribute.name)
def get(self, name):
'''Return attribute by *name*.
If no attribute matches *name* then return None.
'''
return self._data.get(name, None)
def keys(self):
'''Return list of attribute names.'''
return self._data.keys()
def __contains__(self, item):
'''Return whether *item* present.'''
if not isinstance(item, Attribute):
return False
return item.name in self._data
def __iter__(self):
'''Return iterator over attributes.'''
return self._data.itervalues()
def __len__(self):
'''Return count of attributes.'''
return len(self._data)
class Attribute(object):
'''A name and value pair persisted remotely.'''
def __init__(
self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True
):
'''Initialise attribute with *name*.
*default_value* represents the default value for the attribute. It may
be a callable. It is not used within the attribute when providing
values, but instead exists for other parts of the system to reference.
If *mutable* is set to False then the local value of the attribute on an
entity can only be set when both the existing local and remote values
are :attr:`ftrack_api_old.symbol.NOT_SET`. The exception to this is when the
target value is also :attr:`ftrack_api_old.symbol.NOT_SET`.
'''
super(Attribute, self).__init__()
self._name = name
self._mutable = mutable
self.default_value = default_value
self._local_key = 'local'
self._remote_key = 'remote'
def __repr__(self):
'''Return representation of entity.'''
return '<{0}.{1}({2}) object at {3}>'.format(
self.__module__,
self.__class__.__name__,
self.name,
id(self)
)
def get_entity_storage(self, entity):
'''Return attribute storage on *entity* creating if missing.'''
storage_key = '_ftrack_attribute_storage'
storage = getattr(entity, storage_key, None)
if storage is None:
storage = collections.defaultdict(
lambda:
{
self._local_key: ftrack_api_old.symbol.NOT_SET,
self._remote_key: ftrack_api_old.symbol.NOT_SET
}
)
setattr(entity, storage_key, storage)
return storage
@property
def name(self):
'''Return name.'''
return self._name
@property
def mutable(self):
'''Return whether attribute is mutable.'''
return self._mutable
def get_value(self, entity):
'''Return current value for *entity*.
If a value was set locally then return it, otherwise return last known
remote value. If no remote value yet retrieved, make a request for it
via the session and block until available.
'''
value = self.get_local_value(entity)
if value is not ftrack_api_old.symbol.NOT_SET:
return value
value = self.get_remote_value(entity)
if value is not ftrack_api_old.symbol.NOT_SET:
return value
if not entity.session.auto_populate:
return value
self.populate_remote_value(entity)
return self.get_remote_value(entity)
def get_local_value(self, entity):
'''Return locally set value for *entity*.'''
storage = self.get_entity_storage(entity)
return storage[self.name][self._local_key]
def get_remote_value(self, entity):
'''Return remote value for *entity*.
.. note::
Only return locally stored remote value, do not fetch from remote.
'''
storage = self.get_entity_storage(entity)
return storage[self.name][self._remote_key]
def set_local_value(self, entity, value):
'''Set local *value* for *entity*.'''
if (
not self.mutable
and self.is_set(entity)
and value is not ftrack_api_old.symbol.NOT_SET
):
raise ftrack_api_old.exception.ImmutableAttributeError(self)
old_value = self.get_local_value(entity)
storage = self.get_entity_storage(entity)
storage[self.name][self._local_key] = value
# Record operation.
if entity.session.record_operations:
entity.session.recorded_operations.push(
ftrack_api_old.operation.UpdateEntityOperation(
entity.entity_type,
ftrack_api_old.inspection.primary_key(entity),
self.name,
old_value,
value
)
)
def set_remote_value(self, entity, value):
'''Set remote *value*.
.. note::
Only set locally stored remote value, do not persist to remote.
'''
storage = self.get_entity_storage(entity)
storage[self.name][self._remote_key] = value
def populate_remote_value(self, entity):
'''Populate remote value for *entity*.'''
entity.session.populate([entity], self.name)
def is_modified(self, entity):
'''Return whether local value set and differs from remote.
.. note::
Will not fetch remote value so may report True even when values
are the same on the remote.
'''
local_value = self.get_local_value(entity)
remote_value = self.get_remote_value(entity)
return (
local_value is not ftrack_api_old.symbol.NOT_SET
and local_value != remote_value
)
def is_set(self, entity):
'''Return whether a value is set for *entity*.'''
return any([
self.get_local_value(entity) is not ftrack_api_old.symbol.NOT_SET,
self.get_remote_value(entity) is not ftrack_api_old.symbol.NOT_SET
])
class ScalarAttribute(Attribute):
'''Represent a scalar value.'''
def __init__(self, name, data_type, **kw):
'''Initialise property.'''
super(ScalarAttribute, self).__init__(name, **kw)
self.data_type = data_type
class ReferenceAttribute(Attribute):
'''Reference another entity.'''
def __init__(self, name, entity_type, **kw):
'''Initialise property.'''
super(ReferenceAttribute, self).__init__(name, **kw)
self.entity_type = entity_type
def populate_remote_value(self, entity):
'''Populate remote value for *entity*.
As attribute references another entity, use that entity's configured
default projections to auto populate useful attributes when loading.
'''
reference_entity_type = entity.session.types[self.entity_type]
default_projections = reference_entity_type.default_projections
projections = []
if default_projections:
for projection in default_projections:
projections.append('{0}.{1}'.format(self.name, projection))
else:
projections.append(self.name)
entity.session.populate([entity], ', '.join(projections))
def is_modified(self, entity):
'''Return whether a local value has been set and differs from remote.
.. note::
Will not fetch remote value so may report True even when values
are the same on the remote.
'''
local_value = self.get_local_value(entity)
remote_value = self.get_remote_value(entity)
if local_value is ftrack_api_old.symbol.NOT_SET:
return False
if remote_value is ftrack_api_old.symbol.NOT_SET:
return True
if (
ftrack_api_old.inspection.identity(local_value)
!= ftrack_api_old.inspection.identity(remote_value)
):
return True
return False
@merge_references
def get_value(self, entity):
return super(ReferenceAttribute, self).get_value(
entity
)
class AbstractCollectionAttribute(Attribute):
'''Base class for collection attributes.'''
#: Collection class used by attribute.
collection_class = None
@merge_references
def get_value(self, entity):
'''Return current value for *entity*.
If a value was set locally then return it, otherwise return last known
remote value. If no remote value yet retrieved, make a request for it
via the session and block until available.
.. note::
As value is a collection that is mutable, will transfer a remote
value into the local value on access if no local value currently
set.
'''
super(AbstractCollectionAttribute, self).get_value(entity)
# Conditionally, copy remote value into local value so that it can be
# mutated without side effects.
local_value = self.get_local_value(entity)
remote_value = self.get_remote_value(entity)
if (
local_value is ftrack_api_old.symbol.NOT_SET
and isinstance(remote_value, self.collection_class)
):
try:
with entity.session.operation_recording(False):
self.set_local_value(entity, copy.copy(remote_value))
except ftrack_api_old.exception.ImmutableAttributeError:
pass
value = self.get_local_value(entity)
# If the local value is still not set then attempt to set it with a
# suitable placeholder collection so that the caller can interact with
# the collection using its normal interface. This is required for a
# newly created entity for example. It *could* be done as a simple
# default value, but that would incur cost for every collection even
# when they are not modified before commit.
if value is ftrack_api_old.symbol.NOT_SET:
try:
with entity.session.operation_recording(False):
self.set_local_value(
entity,
# None should be treated as empty collection.
None
)
except ftrack_api_old.exception.ImmutableAttributeError:
pass
return self.get_local_value(entity)
def set_local_value(self, entity, value):
'''Set local *value* for *entity*.'''
if value is not ftrack_api_old.symbol.NOT_SET:
value = self._adapt_to_collection(entity, value)
value.mutable = self.mutable
super(AbstractCollectionAttribute, self).set_local_value(entity, value)
def set_remote_value(self, entity, value):
'''Set remote *value*.
.. note::
Only set locally stored remote value, do not persist to remote.
'''
if value is not ftrack_api_old.symbol.NOT_SET:
value = self._adapt_to_collection(entity, value)
value.mutable = False
super(AbstractCollectionAttribute, self).set_remote_value(entity, value)
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to appropriate collection instance for *entity*.
.. note::
If *value* is None then return a suitable empty collection.
'''
raise NotImplementedError()
class CollectionAttribute(AbstractCollectionAttribute):
'''Represent a collection of other entities.'''
#: Collection class used by attribute.
collection_class = ftrack_api_old.collection.Collection
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to a Collection instance on *entity*.'''
if not isinstance(value, ftrack_api_old.collection.Collection):
if value is None:
value = ftrack_api_old.collection.Collection(entity, self)
elif isinstance(value, list):
value = ftrack_api_old.collection.Collection(
entity, self, data=value
)
else:
raise NotImplementedError(
'Cannot convert {0!r} to collection.'.format(value)
)
else:
if value.attribute is not self:
raise ftrack_api_old.exception.AttributeError(
'Collection already bound to a different attribute'
)
return value
class KeyValueMappedCollectionAttribute(AbstractCollectionAttribute):
'''Represent a mapped key, value collection of entities.'''
#: Collection class used by attribute.
collection_class = ftrack_api_old.collection.KeyValueMappedCollectionProxy
def __init__(
self, name, creator, key_attribute, value_attribute, **kw
):
'''Initialise attribute with *name*.
*creator* should be a function that accepts a dictionary of data and
is used by the referenced collection to create new entities in the
collection.
*key_attribute* should be the name of the attribute on an entity in
the collection that represents the value for 'key' of the dictionary.
*value_attribute* should be the name of the attribute on an entity in
the collection that represents the value for 'value' of the dictionary.
'''
self.creator = creator
self.key_attribute = key_attribute
self.value_attribute = value_attribute
super(KeyValueMappedCollectionAttribute, self).__init__(name, **kw)
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to an *entity*.'''
if not isinstance(
value, ftrack_api_old.collection.KeyValueMappedCollectionProxy
):
if value is None:
value = ftrack_api_old.collection.KeyValueMappedCollectionProxy(
ftrack_api_old.collection.Collection(entity, self),
self.creator, self.key_attribute,
self.value_attribute
)
elif isinstance(value, (list, ftrack_api_old.collection.Collection)):
if isinstance(value, list):
value = ftrack_api_old.collection.Collection(
entity, self, data=value
)
value = ftrack_api_old.collection.KeyValueMappedCollectionProxy(
value, self.creator, self.key_attribute,
self.value_attribute
)
elif isinstance(value, collections.Mapping):
# Convert mapping.
# TODO: When backend model improves, revisit this logic.
# First get existing value and delete all references. This is
# needed because otherwise they will not be automatically
# removed server side.
# The following should not cause recursion as the internal
# values should be mapped collections already.
current_value = self.get_value(entity)
if not isinstance(
current_value,
ftrack_api_old.collection.KeyValueMappedCollectionProxy
):
raise NotImplementedError(
'Cannot adapt mapping to collection as current value '
'type is not a KeyValueMappedCollectionProxy.'
)
# Create the new collection using the existing collection as
# basis. Then update through proxy interface to ensure all
# internal operations called consistently (such as entity
# deletion for key removal).
collection = ftrack_api_old.collection.Collection(
entity, self, data=current_value.collection[:]
)
collection_proxy = (
ftrack_api_old.collection.KeyValueMappedCollectionProxy(
collection, self.creator,
self.key_attribute, self.value_attribute
)
)
# Remove expired keys from collection.
expired_keys = set(current_value.keys()) - set(value.keys())
for key in expired_keys:
del collection_proxy[key]
# Set new values for existing keys / add new keys.
for key, value in value.items():
collection_proxy[key] = value
value = collection_proxy
else:
raise NotImplementedError(
'Cannot convert {0!r} to collection.'.format(value)
)
else:
if value.attribute is not self:
raise ftrack_api_old.exception.AttributeError(
'Collection already bound to a different attribute.'
)
return value
class CustomAttributeCollectionAttribute(AbstractCollectionAttribute):
'''Represent a mapped custom attribute collection of entities.'''
#: Collection class used by attribute.
collection_class = (
ftrack_api_old.collection.CustomAttributeCollectionProxy
)
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to an *entity*.'''
if not isinstance(
value, ftrack_api_old.collection.CustomAttributeCollectionProxy
):
if value is None:
value = ftrack_api_old.collection.CustomAttributeCollectionProxy(
ftrack_api_old.collection.Collection(entity, self)
)
elif isinstance(value, (list, ftrack_api_old.collection.Collection)):
# Why are we creating a new if it is a list? This will cause
# any merge to create a new proxy and collection.
if isinstance(value, list):
value = ftrack_api_old.collection.Collection(
entity, self, data=value
)
value = ftrack_api_old.collection.CustomAttributeCollectionProxy(
value
)
elif isinstance(value, collections.Mapping):
# Convert mapping.
# TODO: When backend model improves, revisit this logic.
# First get existing value and delete all references. This is
# needed because otherwise they will not be automatically
# removed server side.
# The following should not cause recursion as the internal
# values should be mapped collections already.
current_value = self.get_value(entity)
if not isinstance(
current_value,
ftrack_api_old.collection.CustomAttributeCollectionProxy
):
raise NotImplementedError(
'Cannot adapt mapping to collection as current value '
'type is not a MappedCollectionProxy.'
)
# Create the new collection using the existing collection as
# basis. Then update through proxy interface to ensure all
# internal operations called consistently (such as entity
# deletion for key removal).
collection = ftrack_api_old.collection.Collection(
entity, self, data=current_value.collection[:]
)
collection_proxy = (
ftrack_api_old.collection.CustomAttributeCollectionProxy(
collection
)
)
# Remove expired keys from collection.
expired_keys = set(current_value.keys()) - set(value.keys())
for key in expired_keys:
del collection_proxy[key]
# Set new values for existing keys / add new keys.
for key, value in value.items():
collection_proxy[key] = value
value = collection_proxy
else:
raise NotImplementedError(
'Cannot convert {0!r} to collection.'.format(value)
)
else:
if value.attribute is not self:
raise ftrack_api_old.exception.AttributeError(
'Collection already bound to a different attribute.'
)
return value

579
pype/vendor/ftrack_api_old/cache.py vendored Normal file
View file

@ -0,0 +1,579 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
'''Caching framework.
Defines a standardised :class:`Cache` interface for storing data against
specific keys. Key generation is also standardised using a :class:`KeyMaker`
interface.
Combining a Cache and KeyMaker allows for memoisation of function calls with
respect to the arguments used by using a :class:`Memoiser`.
As a convenience a simple :func:`memoise` decorator is included for quick
memoisation of function using a global cache and standard key maker.
'''
import collections
import functools
import abc
import copy
import inspect
import re
import anydbm
import contextlib
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
import ftrack_api_old.inspection
import ftrack_api_old.symbol
class Cache(object):
'''Cache interface.
Derive from this to define concrete cache implementations. A cache is
centered around the concept of key:value pairings where the key is unique
across the cache.
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
@abc.abstractmethod
def set(self, key, value):
'''Set *value* for *key*.'''
@abc.abstractmethod
def remove(self, key):
'''Remove *key* and return stored value.
Raise :exc:`KeyError` if *key* not found.
'''
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
raise NotImplementedError() # pragma: no cover
def values(self):
'''Return values for current keys.'''
values = []
for key in self.keys():
try:
value = self.get(key)
except KeyError:
continue
else:
values.append(value)
return values
def clear(self, pattern=None):
'''Remove all keys matching *pattern*.
*pattern* should be a regular expression string.
If *pattern* is None then all keys will be removed.
'''
if pattern is not None:
pattern = re.compile(pattern)
for key in self.keys():
if pattern is not None:
if not pattern.search(key):
continue
try:
self.remove(key)
except KeyError:
pass
class ProxyCache(Cache):
'''Proxy another cache.'''
def __init__(self, proxied):
'''Initialise cache with *proxied* cache instance.'''
self.proxied = proxied
super(ProxyCache, self).__init__()
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
return self.proxied.get(key)
def set(self, key, value):
'''Set *value* for *key*.'''
return self.proxied.set(key, value)
def remove(self, key):
'''Remove *key* and return stored value.
Raise :exc:`KeyError` if *key* not found.
'''
return self.proxied.remove(key)
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
return self.proxied.keys()
class LayeredCache(Cache):
'''Layered cache.'''
def __init__(self, caches):
'''Initialise cache with *caches*.'''
super(LayeredCache, self).__init__()
self.caches = caches
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
Attempt to retrieve from cache layers in turn, starting with shallowest.
If value retrieved, then also set the value in each higher level cache
up from where retrieved.
'''
target_caches = []
value = ftrack_api_old.symbol.NOT_SET
for cache in self.caches:
try:
value = cache.get(key)
except KeyError:
target_caches.append(cache)
continue
else:
break
if value is ftrack_api_old.symbol.NOT_SET:
raise KeyError(key)
# Set value on all higher level caches.
for cache in target_caches:
cache.set(key, value)
return value
def set(self, key, value):
'''Set *value* for *key*.'''
for cache in self.caches:
cache.set(key, value)
def remove(self, key):
'''Remove *key*.
Raise :exc:`KeyError` if *key* not found in any layer.
'''
removed = False
for cache in self.caches:
try:
cache.remove(key)
except KeyError:
pass
else:
removed = True
if not removed:
raise KeyError(key)
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
keys = []
for cache in self.caches:
keys.extend(cache.keys())
return list(set(keys))
class MemoryCache(Cache):
'''Memory based cache.'''
def __init__(self):
'''Initialise cache.'''
self._cache = {}
super(MemoryCache, self).__init__()
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
return self._cache[key]
def set(self, key, value):
'''Set *value* for *key*.'''
self._cache[key] = value
def remove(self, key):
'''Remove *key*.
Raise :exc:`KeyError` if *key* not found.
'''
del self._cache[key]
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
return self._cache.keys()
class FileCache(Cache):
'''File based cache that uses :mod:`anydbm` module.
.. note::
No locking of the underlying file is performed.
'''
def __init__(self, path):
'''Initialise cache at *path*.'''
self.path = path
# Initialise cache.
cache = anydbm.open(self.path, 'c')
cache.close()
super(FileCache, self).__init__()
@contextlib.contextmanager
def _database(self):
'''Yield opened database file.'''
cache = anydbm.open(self.path, 'w')
try:
yield cache
finally:
cache.close()
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
with self._database() as cache:
return cache[key]
def set(self, key, value):
'''Set *value* for *key*.'''
with self._database() as cache:
cache[key] = value
def remove(self, key):
'''Remove *key*.
Raise :exc:`KeyError` if *key* not found.
'''
with self._database() as cache:
del cache[key]
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
with self._database() as cache:
return cache.keys()
class SerialisedCache(ProxyCache):
'''Proxied cache that stores values as serialised data.'''
def __init__(self, proxied, encode=None, decode=None):
'''Initialise cache with *encode* and *decode* callables.
*proxied* is the underlying cache to use for storage.
'''
self.encode = encode
self.decode = decode
super(SerialisedCache, self).__init__(proxied)
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
value = super(SerialisedCache, self).get(key)
if self.decode:
value = self.decode(value)
return value
def set(self, key, value):
'''Set *value* for *key*.'''
if self.encode:
value = self.encode(value)
super(SerialisedCache, self).set(key, value)
class KeyMaker(object):
'''Generate unique keys.'''
__metaclass__ = abc.ABCMeta
def __init__(self):
'''Initialise key maker.'''
super(KeyMaker, self).__init__()
self.item_separator = ''
def key(self, *items):
'''Return key for *items*.'''
keys = []
for item in items:
keys.append(self._key(item))
return self.item_separator.join(keys)
@abc.abstractmethod
def _key(self, obj):
'''Return key for *obj*.'''
class StringKeyMaker(KeyMaker):
'''Generate string key.'''
def _key(self, obj):
'''Return key for *obj*.'''
return str(obj)
class ObjectKeyMaker(KeyMaker):
'''Generate unique keys for objects.'''
def __init__(self):
'''Initialise key maker.'''
super(ObjectKeyMaker, self).__init__()
self.item_separator = '\0'
self.mapping_identifier = '\1'
self.mapping_pair_separator = '\2'
self.iterable_identifier = '\3'
self.name_identifier = '\4'
def _key(self, item):
'''Return key for *item*.
Returned key will be a pickle like string representing the *item*. This
allows for typically non-hashable objects to be used in key generation
(such as dictionaries).
If *item* is iterable then each item in it shall also be passed to this
method to ensure correct key generation.
Special markers are used to distinguish handling of specific cases in
order to ensure uniqueness of key corresponds directly to *item*.
Example::
>>> key_maker = ObjectKeyMaker()
>>> def add(x, y):
... "Return sum of *x* and *y*."
... return x + y
...
>>> key_maker.key(add, (1, 2))
'\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x02.\x03'
>>> key_maker.key(add, (1, 3))
'\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x03.\x03'
'''
# TODO: Consider using a more robust and comprehensive solution such as
# dill (https://github.com/uqfoundation/dill).
if isinstance(item, collections.Iterable):
if isinstance(item, basestring):
return pickle.dumps(item, pickle.HIGHEST_PROTOCOL)
if isinstance(item, collections.Mapping):
contents = self.item_separator.join([
(
self._key(key) +
self.mapping_pair_separator +
self._key(value)
)
for key, value in sorted(item.items())
])
return (
self.mapping_identifier +
contents +
self.mapping_identifier
)
else:
contents = self.item_separator.join([
self._key(item) for item in item
])
return (
self.iterable_identifier +
contents +
self.iterable_identifier
)
elif inspect.ismethod(item):
return ''.join((
self.name_identifier,
item.__name__,
self.item_separator,
item.im_class.__name__,
self.item_separator,
item.__module__
))
elif inspect.isfunction(item) or inspect.isclass(item):
return ''.join((
self.name_identifier,
item.__name__,
self.item_separator,
item.__module__
))
elif inspect.isbuiltin(item):
return self.name_identifier + item.__name__
else:
return pickle.dumps(item, pickle.HIGHEST_PROTOCOL)
class Memoiser(object):
'''Memoise function calls using a :class:`KeyMaker` and :class:`Cache`.
Example::
>>> memoiser = Memoiser(MemoryCache(), ObjectKeyMaker())
>>> def add(x, y):
... "Return sum of *x* and *y*."
... print 'Called'
... return x + y
...
>>> memoiser.call(add, (1, 2), {})
Called
>>> memoiser.call(add, (1, 2), {})
>>> memoiser.call(add, (1, 3), {})
Called
'''
def __init__(self, cache=None, key_maker=None, return_copies=True):
'''Initialise with *cache* and *key_maker* to use.
If *cache* is not specified a default :class:`MemoryCache` will be
used. Similarly, if *key_maker* is not specified a default
:class:`ObjectKeyMaker` will be used.
If *return_copies* is True then all results returned from the cache will
be deep copies to avoid indirect mutation of cached values.
'''
self.cache = cache
if self.cache is None:
self.cache = MemoryCache()
self.key_maker = key_maker
if self.key_maker is None:
self.key_maker = ObjectKeyMaker()
self.return_copies = return_copies
super(Memoiser, self).__init__()
def call(self, function, args=None, kw=None):
'''Call *function* with *args* and *kw* and return result.
If *function* was previously called with exactly the same arguments
then return cached result if available.
Store result for call in cache.
'''
if args is None:
args = ()
if kw is None:
kw = {}
# Support arguments being passed as positionals or keywords.
arguments = inspect.getcallargs(function, *args, **kw)
key = self.key_maker.key(function, arguments)
try:
value = self.cache.get(key)
except KeyError:
value = function(*args, **kw)
self.cache.set(key, value)
# If requested, deep copy value to return in order to avoid cached value
# being inadvertently altered by the caller.
if self.return_copies:
value = copy.deepcopy(value)
return value
def memoise_decorator(memoiser):
'''Decorator to memoise function calls using *memoiser*.'''
def outer(function):
@functools.wraps(function)
def inner(*args, **kw):
return memoiser.call(function, args, kw)
return inner
return outer
#: Default memoiser.
memoiser = Memoiser()
#: Default memoise decorator using standard cache and key maker.
memoise = memoise_decorator(memoiser)

507
pype/vendor/ftrack_api_old/collection.py vendored Normal file
View file

@ -0,0 +1,507 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import logging
import collections
import copy
import ftrack_api_old.exception
import ftrack_api_old.inspection
import ftrack_api_old.symbol
import ftrack_api_old.operation
import ftrack_api_old.cache
from ftrack_api_old.logging import LazyLogMessage as L
class Collection(collections.MutableSequence):
'''A collection of entities.'''
def __init__(self, entity, attribute, mutable=True, data=None):
'''Initialise collection.'''
self.entity = entity
self.attribute = attribute
self._data = []
self._identities = set()
# Set initial dataset.
# Note: For initialisation, immutability is deferred till after initial
# population as otherwise there would be no public way to initialise an
# immutable collection. The reason self._data is not just set directly
# is to ensure other logic can be applied without special handling.
self.mutable = True
try:
if data is None:
data = []
with self.entity.session.operation_recording(False):
self.extend(data)
finally:
self.mutable = mutable
def _identity_key(self, entity):
'''Return identity key for *entity*.'''
return str(ftrack_api_old.inspection.identity(entity))
def __copy__(self):
'''Return shallow copy.
.. note::
To maintain expectations on usage, the shallow copy will include a
shallow copy of the underlying data store.
'''
cls = self.__class__
copied_instance = cls.__new__(cls)
copied_instance.__dict__.update(self.__dict__)
copied_instance._data = copy.copy(self._data)
copied_instance._identities = copy.copy(self._identities)
return copied_instance
def _notify(self, old_value):
'''Notify about modification.'''
# Record operation.
if self.entity.session.record_operations:
self.entity.session.recorded_operations.push(
ftrack_api_old.operation.UpdateEntityOperation(
self.entity.entity_type,
ftrack_api_old.inspection.primary_key(self.entity),
self.attribute.name,
old_value,
self
)
)
def insert(self, index, item):
'''Insert *item* at *index*.'''
if not self.mutable:
raise ftrack_api_old.exception.ImmutableCollectionError(self)
if item in self:
raise ftrack_api_old.exception.DuplicateItemInCollectionError(
item, self
)
old_value = copy.copy(self)
self._data.insert(index, item)
self._identities.add(self._identity_key(item))
self._notify(old_value)
def __contains__(self, value):
'''Return whether *value* present in collection.'''
return self._identity_key(value) in self._identities
def __getitem__(self, index):
'''Return item at *index*.'''
return self._data[index]
def __setitem__(self, index, item):
'''Set *item* against *index*.'''
if not self.mutable:
raise ftrack_api_old.exception.ImmutableCollectionError(self)
try:
existing_index = self.index(item)
except ValueError:
pass
else:
if index != existing_index:
raise ftrack_api_old.exception.DuplicateItemInCollectionError(
item, self
)
old_value = copy.copy(self)
try:
existing_item = self._data[index]
except IndexError:
pass
else:
self._identities.remove(self._identity_key(existing_item))
self._data[index] = item
self._identities.add(self._identity_key(item))
self._notify(old_value)
def __delitem__(self, index):
'''Remove item at *index*.'''
if not self.mutable:
raise ftrack_api_old.exception.ImmutableCollectionError(self)
old_value = copy.copy(self)
item = self._data[index]
del self._data[index]
self._identities.remove(self._identity_key(item))
self._notify(old_value)
def __len__(self):
'''Return count of items.'''
return len(self._data)
def __eq__(self, other):
'''Return whether this collection is equal to *other*.'''
if not isinstance(other, Collection):
return False
return sorted(self._identities) == sorted(other._identities)
def __ne__(self, other):
'''Return whether this collection is not equal to *other*.'''
return not self == other
class MappedCollectionProxy(collections.MutableMapping):
'''Common base class for mapped collection of entities.'''
def __init__(self, collection):
'''Initialise proxy for *collection*.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self.collection = collection
super(MappedCollectionProxy, self).__init__()
def __copy__(self):
'''Return shallow copy.
.. note::
To maintain expectations on usage, the shallow copy will include a
shallow copy of the underlying collection.
'''
cls = self.__class__
copied_instance = cls.__new__(cls)
copied_instance.__dict__.update(self.__dict__)
copied_instance.collection = copy.copy(self.collection)
return copied_instance
@property
def mutable(self):
'''Return whether collection is mutable.'''
return self.collection.mutable
@mutable.setter
def mutable(self, value):
'''Set whether collection is mutable to *value*.'''
self.collection.mutable = value
@property
def attribute(self):
'''Return attribute bound to.'''
return self.collection.attribute
@attribute.setter
def attribute(self, value):
'''Set bound attribute to *value*.'''
self.collection.attribute = value
class KeyValueMappedCollectionProxy(MappedCollectionProxy):
'''A mapped collection of key, value entities.
Proxy a standard :class:`Collection` as a mapping where certain attributes
from the entities in the collection are mapped to key, value pairs.
For example::
>>> collection = [Metadata(key='foo', value='bar'), ...]
>>> mapped = KeyValueMappedCollectionProxy(
... collection, create_metadata,
... key_attribute='key', value_attribute='value'
... )
>>> print mapped['foo']
'bar'
>>> mapped['bam'] = 'biz'
>>> print mapped.collection[-1]
Metadata(key='bam', value='biz')
'''
def __init__(
self, collection, creator, key_attribute, value_attribute
):
'''Initialise collection.'''
self.creator = creator
self.key_attribute = key_attribute
self.value_attribute = value_attribute
super(KeyValueMappedCollectionProxy, self).__init__(collection)
def _get_entity_by_key(self, key):
'''Return entity instance with matching *key* from collection.'''
for entity in self.collection:
if entity[self.key_attribute] == key:
return entity
raise KeyError(key)
def __getitem__(self, key):
'''Return value for *key*.'''
entity = self._get_entity_by_key(key)
return entity[self.value_attribute]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
try:
entity = self._get_entity_by_key(key)
except KeyError:
data = {
self.key_attribute: key,
self.value_attribute: value
}
entity = self.creator(self, data)
if (
ftrack_api_old.inspection.state(entity) is
ftrack_api_old.symbol.CREATED
):
# Persisting this entity will be handled here, record the
# operation.
self.collection.append(entity)
else:
# The entity is created and persisted separately by the
# creator. Do not record this operation.
with self.collection.entity.session.operation_recording(False):
# Do not record this operation since it will trigger
# redudant and potentially failing operations.
self.collection.append(entity)
else:
entity[self.value_attribute] = value
def __delitem__(self, key):
'''Remove and delete *key*.
.. note::
The associated entity will be deleted as well.
'''
for index, entity in enumerate(self.collection):
if entity[self.key_attribute] == key:
break
else:
raise KeyError(key)
del self.collection[index]
entity.session.delete(entity)
def __iter__(self):
'''Iterate over all keys.'''
keys = set()
for entity in self.collection:
keys.add(entity[self.key_attribute])
return iter(keys)
def __len__(self):
'''Return count of keys.'''
keys = set()
for entity in self.collection:
keys.add(entity[self.key_attribute])
return len(keys)
class PerSessionDefaultKeyMaker(ftrack_api_old.cache.KeyMaker):
'''Generate key for session.'''
def _key(self, obj):
'''Return key for *obj*.'''
if isinstance(obj, dict):
session = obj.get('session')
if session is not None:
# Key by session only.
return str(id(session))
return str(obj)
#: Memoiser for use with callables that should be called once per session.
memoise_session = ftrack_api_old.cache.memoise_decorator(
ftrack_api_old.cache.Memoiser(
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
)
)
@memoise_session
def _get_custom_attribute_configurations(session):
'''Return list of custom attribute configurations.
The configuration objects will have key, project_id, id and object_type_id
populated.
'''
return session.query(
'select key, project_id, id, object_type_id, entity_type from '
'CustomAttributeConfiguration'
).all()
class CustomAttributeCollectionProxy(MappedCollectionProxy):
'''A mapped collection of custom attribute value entities.'''
def __init__(
self, collection
):
'''Initialise collection.'''
self.key_attribute = 'configuration_id'
self.value_attribute = 'value'
super(CustomAttributeCollectionProxy, self).__init__(collection)
def _get_entity_configurations(self):
'''Return all configurations for current collection entity.'''
entity = self.collection.entity
entity_type = None
project_id = None
object_type_id = None
if 'object_type_id' in entity.keys():
project_id = entity['project_id']
entity_type = 'task'
object_type_id = entity['object_type_id']
if entity.entity_type == 'AssetVersion':
project_id = entity['asset']['parent']['project_id']
entity_type = 'assetversion'
if entity.entity_type == 'Asset':
project_id = entity['parent']['project_id']
entity_type = 'asset'
if entity.entity_type == 'Project':
project_id = entity['id']
entity_type = 'show'
if entity.entity_type == 'User':
entity_type = 'user'
if entity_type is None:
raise ValueError(
'Entity {!r} not supported.'.format(entity)
)
configurations = []
for configuration in _get_custom_attribute_configurations(
entity.session
):
if (
configuration['entity_type'] == entity_type and
configuration['project_id'] in (project_id, None) and
configuration['object_type_id'] == object_type_id
):
configurations.append(configuration)
# Return with global configurations at the end of the list. This is done
# so that global conigurations are shadowed by project specific if the
# configurations list is looped when looking for a matching `key`.
return sorted(
configurations, key=lambda item: item['project_id'] is None
)
def _get_keys(self):
'''Return a list of all keys.'''
keys = []
for configuration in self._get_entity_configurations():
keys.append(configuration['key'])
return keys
def _get_entity_by_key(self, key):
'''Return entity instance with matching *key* from collection.'''
configuration_id = self.get_configuration_id_from_key(key)
for entity in self.collection:
if entity[self.key_attribute] == configuration_id:
return entity
return None
def get_configuration_id_from_key(self, key):
'''Return id of configuration with matching *key*.
Raise :exc:`KeyError` if no configuration with matching *key* found.
'''
for configuration in self._get_entity_configurations():
if key == configuration['key']:
return configuration['id']
raise KeyError(key)
def __getitem__(self, key):
'''Return value for *key*.'''
entity = self._get_entity_by_key(key)
if entity:
return entity[self.value_attribute]
for configuration in self._get_entity_configurations():
if configuration['key'] == key:
return configuration['default']
raise KeyError(key)
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
custom_attribute_value = self._get_entity_by_key(key)
if custom_attribute_value:
custom_attribute_value[self.value_attribute] = value
else:
entity = self.collection.entity
session = entity.session
data = {
self.key_attribute: self.get_configuration_id_from_key(key),
self.value_attribute: value,
'entity_id': entity['id']
}
# Make sure to use the currently active collection. This is
# necessary since a merge might have replaced the current one.
self.collection.entity['custom_attributes'].collection.append(
session.create('CustomAttributeValue', data)
)
def __delitem__(self, key):
'''Remove and delete *key*.
.. note::
The associated entity will be deleted as well.
'''
custom_attribute_value = self._get_entity_by_key(key)
if custom_attribute_value:
index = self.collection.index(custom_attribute_value)
del self.collection[index]
custom_attribute_value.session.delete(custom_attribute_value)
else:
self.logger.warning(L(
'Cannot delete {0!r} on {1!r}, no custom attribute value set.',
key, self.collection.entity
))
def __eq__(self, collection):
'''Return True if *collection* equals proxy collection.'''
if collection is ftrack_api_old.symbol.NOT_SET:
return False
return collection.collection == self.collection
def __iter__(self):
'''Iterate over all keys.'''
keys = self._get_keys()
return iter(keys)
def __len__(self):
'''Return count of keys.'''
keys = self._get_keys()
return len(keys)

119
pype/vendor/ftrack_api_old/data.py vendored Normal file
View file

@ -0,0 +1,119 @@
# :coding: utf-8
# :copyright: Copyright (c) 2013 ftrack
import os
from abc import ABCMeta, abstractmethod
import tempfile
class Data(object):
'''File-like object for manipulating data.'''
__metaclass__ = ABCMeta
def __init__(self):
'''Initialise data access.'''
self.closed = False
@abstractmethod
def read(self, limit=None):
'''Return content from current position up to *limit*.'''
@abstractmethod
def write(self, content):
'''Write content at current position.'''
def flush(self):
'''Flush buffers ensuring data written.'''
def seek(self, offset, whence=os.SEEK_SET):
'''Move internal pointer by *offset*.
The *whence* argument is optional and defaults to os.SEEK_SET or 0
(absolute file positioning); other values are os.SEEK_CUR or 1
(seek relative to the current position) and os.SEEK_END or 2
(seek relative to the file's end).
'''
raise NotImplementedError('Seek not supported.')
def tell(self):
'''Return current position of internal pointer.'''
raise NotImplementedError('Tell not supported.')
def close(self):
'''Flush buffers and prevent further access.'''
self.flush()
self.closed = True
class FileWrapper(Data):
'''Data wrapper for Python file objects.'''
def __init__(self, wrapped_file):
'''Initialise access to *wrapped_file*.'''
self.wrapped_file = wrapped_file
self._read_since_last_write = False
super(FileWrapper, self).__init__()
def read(self, limit=None):
'''Return content from current position up to *limit*.'''
self._read_since_last_write = True
if limit is None:
limit = -1
return self.wrapped_file.read(limit)
def write(self, content):
'''Write content at current position.'''
if self._read_since_last_write:
# Windows requires a seek before switching from read to write.
self.seek(self.tell())
self.wrapped_file.write(content)
self._read_since_last_write = False
def flush(self):
'''Flush buffers ensuring data written.'''
super(FileWrapper, self).flush()
if hasattr(self.wrapped_file, 'flush'):
self.wrapped_file.flush()
def seek(self, offset, whence=os.SEEK_SET):
'''Move internal pointer by *offset*.'''
self.wrapped_file.seek(offset, whence)
def tell(self):
'''Return current position of internal pointer.'''
return self.wrapped_file.tell()
def close(self):
'''Flush buffers and prevent further access.'''
if not self.closed:
super(FileWrapper, self).close()
if hasattr(self.wrapped_file, 'close'):
self.wrapped_file.close()
class File(FileWrapper):
'''Data wrapper accepting filepath.'''
def __init__(self, path, mode='rb'):
'''Open file at *path* with *mode*.'''
file_object = open(path, mode)
super(File, self).__init__(file_object)
class String(FileWrapper):
'''Data wrapper using TemporaryFile instance.'''
def __init__(self, content=None):
'''Initialise data with *content*.'''
super(String, self).__init__(
tempfile.TemporaryFile()
)
if content is not None:
self.wrapped_file.write(content)
self.wrapped_file.seek(0)

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,91 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api_old.entity.base
class AssetVersion(ftrack_api_old.entity.base.Entity):
'''Represent asset version.'''
def create_component(
self, path, data=None, location=None
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`). This version is
automatically set as the component's version.
If *location* is specified then automatically add component to that
location.
'''
if data is None:
data = {}
data.pop('version_id', None)
data['version'] = self
return self.session.create_component(path, data=data, location=location)
def encode_media(self, media, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible, and will be
set as the version's thumbnail.
The new components will automatically be associated with the version.
A server version of 3.3.32 or higher is required for this to function
properly.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
return self.session.encode_media(
media, version_id=self['id'], keep_original=keep_original
)

View file

@ -0,0 +1,402 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import abc
import collections
import logging
import ftrack_api_old.symbol
import ftrack_api_old.attribute
import ftrack_api_old.inspection
import ftrack_api_old.exception
import ftrack_api_old.operation
from ftrack_api_old.logging import LazyLogMessage as L
class DynamicEntityTypeMetaclass(abc.ABCMeta):
'''Custom metaclass to customise representation of dynamic classes.
.. note::
Derive from same metaclass as derived bases to avoid conflicts.
'''
def __repr__(self):
'''Return representation of class.'''
return '<dynamic ftrack class \'{0}\'>'.format(self.__name__)
class Entity(collections.MutableMapping):
'''Base class for all entities.'''
__metaclass__ = DynamicEntityTypeMetaclass
entity_type = 'Entity'
attributes = None
primary_key_attributes = None
default_projections = None
def __init__(self, session, data=None, reconstructing=False):
'''Initialise entity.
*session* is an instance of :class:`ftrack_api_old.session.Session` that
this entity instance is bound to.
*data* is a mapping of key, value pairs to apply as initial attribute
values.
*reconstructing* indicates whether this entity is being reconstructed,
such as from a query, and therefore should not have any special creation
logic applied, such as initialising defaults for missing data.
'''
super(Entity, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self.session = session
self._inflated = set()
if data is None:
data = {}
self.logger.debug(L(
'{0} entity from {1!r}.',
('Reconstructing' if reconstructing else 'Constructing'), data
))
self._ignore_data_keys = ['__entity_type__']
if not reconstructing:
self._construct(data)
else:
self._reconstruct(data)
def _construct(self, data):
'''Construct from *data*.'''
# Suspend operation recording so that all modifications can be applied
# in single create operation. In addition, recording a modification
# operation requires a primary key which may not be available yet.
relational_attributes = dict()
with self.session.operation_recording(False):
# Set defaults for any unset local attributes.
for attribute in self.__class__.attributes:
if attribute.name not in data:
default_value = attribute.default_value
if callable(default_value):
default_value = default_value(self)
attribute.set_local_value(self, default_value)
# Data represents locally set values.
for key, value in data.items():
if key in self._ignore_data_keys:
continue
attribute = self.__class__.attributes.get(key)
if attribute is None:
self.logger.debug(L(
'Cannot populate {0!r} attribute as no such '
'attribute found on entity {1!r}.', key, self
))
continue
if not isinstance(attribute, ftrack_api_old.attribute.ScalarAttribute):
relational_attributes.setdefault(
attribute, value
)
else:
attribute.set_local_value(self, value)
# Record create operation.
# Note: As this operation is recorded *before* any Session.merge takes
# place there is the possibility that the operation will hold references
# to outdated data in entity_data. However, this would be unusual in
# that it would mean the same new entity was created twice and only one
# altered. Conversely, if this operation were recorded *after*
# Session.merge took place, any cache would not be able to determine
# the status of the entity, which could be important if the cache should
# not store newly created entities that have not yet been persisted. Out
# of these two 'evils' this approach is deemed the lesser at this time.
# A third, more involved, approach to satisfy both might be to record
# the operation with a PENDING entity_data value and then update with
# merged values post merge.
if self.session.record_operations:
entity_data = {}
# Lower level API used here to avoid including any empty
# collections that are automatically generated on access.
for attribute in self.attributes:
value = attribute.get_local_value(self)
if value is not ftrack_api_old.symbol.NOT_SET:
entity_data[attribute.name] = value
self.session.recorded_operations.push(
ftrack_api_old.operation.CreateEntityOperation(
self.entity_type,
ftrack_api_old.inspection.primary_key(self),
entity_data
)
)
for attribute, value in relational_attributes.items():
# Finally we set values for "relational" attributes, we need
# to do this at the end in order to get the create operations
# in the correct order as the newly created attributes might
# contain references to the newly created entity.
attribute.set_local_value(
self, value
)
def _reconstruct(self, data):
'''Reconstruct from *data*.'''
# Data represents remote values.
for key, value in data.items():
if key in self._ignore_data_keys:
continue
attribute = self.__class__.attributes.get(key)
if attribute is None:
self.logger.debug(L(
'Cannot populate {0!r} attribute as no such attribute '
'found on entity {1!r}.', key, self
))
continue
attribute.set_remote_value(self, value)
def __repr__(self):
'''Return representation of instance.'''
return '<dynamic ftrack {0} object {1}>'.format(
self.__class__.__name__, id(self)
)
def __str__(self):
'''Return string representation of instance.'''
with self.session.auto_populating(False):
primary_key = ['Unknown']
try:
primary_key = ftrack_api_old.inspection.primary_key(self).values()
except KeyError:
pass
return '<{0}({1})>'.format(
self.__class__.__name__, ', '.join(primary_key)
)
def __hash__(self):
'''Return hash representing instance.'''
return hash(str(ftrack_api_old.inspection.identity(self)))
def __eq__(self, other):
'''Return whether *other* is equal to this instance.
.. note::
Equality is determined by both instances having the same identity.
Values of attributes are not considered.
'''
try:
return (
ftrack_api_old.inspection.identity(other)
== ftrack_api_old.inspection.identity(self)
)
except (AttributeError, KeyError):
return False
def __getitem__(self, key):
'''Return attribute value for *key*.'''
attribute = self.__class__.attributes.get(key)
if attribute is None:
raise KeyError(key)
return attribute.get_value(self)
def __setitem__(self, key, value):
'''Set attribute *value* for *key*.'''
attribute = self.__class__.attributes.get(key)
if attribute is None:
raise KeyError(key)
attribute.set_local_value(self, value)
def __delitem__(self, key):
'''Clear attribute value for *key*.
.. note::
Will not remove the attribute, but instead clear any local value
and revert to the last known server value.
'''
attribute = self.__class__.attributes.get(key)
attribute.set_local_value(self, ftrack_api_old.symbol.NOT_SET)
def __iter__(self):
'''Iterate over all attributes keys.'''
for attribute in self.__class__.attributes:
yield attribute.name
def __len__(self):
'''Return count of attributes.'''
return len(self.__class__.attributes)
def values(self):
'''Return list of values.'''
if self.session.auto_populate:
self._populate_unset_scalar_attributes()
return super(Entity, self).values()
def items(self):
'''Return list of tuples of (key, value) pairs.
.. note::
Will fetch all values from the server if not already fetched or set
locally.
'''
if self.session.auto_populate:
self._populate_unset_scalar_attributes()
return super(Entity, self).items()
def clear(self):
'''Reset all locally modified attribute values.'''
for attribute in self:
del self[attribute]
def merge(self, entity, merged=None):
'''Merge *entity* attribute values and other data into this entity.
Only merge values from *entity* that are not
:attr:`ftrack_api_old.symbol.NOT_SET`.
Return a list of changes made with each change being a mapping with
the keys:
* type - Either 'remote_attribute', 'local_attribute' or 'property'.
* name - The name of the attribute / property modified.
* old_value - The previous value.
* new_value - The new merged value.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
log_message = 'Merged {type} "{name}": {old_value!r} -> {new_value!r}'
changes = []
# Attributes.
# Prioritise by type so that scalar values are set first. This should
# guarantee that the attributes making up the identity of the entity
# are merged before merging any collections that may have references to
# this entity.
attributes = collections.deque()
for attribute in entity.attributes:
if isinstance(attribute, ftrack_api_old.attribute.ScalarAttribute):
attributes.appendleft(attribute)
else:
attributes.append(attribute)
for other_attribute in attributes:
attribute = self.attributes.get(other_attribute.name)
# Local attributes.
other_local_value = other_attribute.get_local_value(entity)
if other_local_value is not ftrack_api_old.symbol.NOT_SET:
local_value = attribute.get_local_value(self)
if local_value != other_local_value:
merged_local_value = self.session.merge(
other_local_value, merged=merged
)
attribute.set_local_value(self, merged_local_value)
changes.append({
'type': 'local_attribute',
'name': attribute.name,
'old_value': local_value,
'new_value': merged_local_value
})
log_debug and self.logger.debug(
log_message.format(**changes[-1])
)
# Remote attributes.
other_remote_value = other_attribute.get_remote_value(entity)
if other_remote_value is not ftrack_api_old.symbol.NOT_SET:
remote_value = attribute.get_remote_value(self)
if remote_value != other_remote_value:
merged_remote_value = self.session.merge(
other_remote_value, merged=merged
)
attribute.set_remote_value(
self, merged_remote_value
)
changes.append({
'type': 'remote_attribute',
'name': attribute.name,
'old_value': remote_value,
'new_value': merged_remote_value
})
log_debug and self.logger.debug(
log_message.format(**changes[-1])
)
# We need to handle collections separately since
# they may store a local copy of the remote attribute
# even though it may not be modified.
if not isinstance(
attribute, ftrack_api_old.attribute.AbstractCollectionAttribute
):
continue
local_value = attribute.get_local_value(
self
)
# Populated but not modified, update it.
if (
local_value is not ftrack_api_old.symbol.NOT_SET and
local_value == remote_value
):
attribute.set_local_value(
self, merged_remote_value
)
changes.append({
'type': 'local_attribute',
'name': attribute.name,
'old_value': local_value,
'new_value': merged_remote_value
})
log_debug and self.logger.debug(
log_message.format(**changes[-1])
)
return changes
def _populate_unset_scalar_attributes(self):
'''Populate all unset scalar attributes in one query.'''
projections = []
for attribute in self.attributes:
if isinstance(attribute, ftrack_api_old.attribute.ScalarAttribute):
if attribute.get_remote_value(self) is ftrack_api_old.symbol.NOT_SET:
projections.append(attribute.name)
if projections:
self.session.populate([self], ', '.join(projections))

View file

@ -0,0 +1,74 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api_old.entity.base
class Component(ftrack_api_old.entity.base.Entity):
'''Represent a component.'''
def get_availability(self, locations=None):
'''Return availability in *locations*.
If *locations* is None, all known locations will be checked.
Return a dictionary of {location_id:percentage_availability}
'''
return self.session.get_component_availability(
self, locations=locations
)
class CreateThumbnailMixin(object):
'''Mixin to add create_thumbnail method on entity class.'''
def create_thumbnail(self, path, data=None):
'''Set entity thumbnail from *path*.
Creates a thumbnail component using in the ftrack.server location
:meth:`Session.create_component
<ftrack_api_old.session.Session.create_component>` The thumbnail component
will be created using *data* if specified. If no component name is
given, `thumbnail` will be used.
The file is expected to be of an appropriate size and valid file
type.
.. note::
A :meth:`Session.commit<ftrack_api_old.session.Session.commit>` will be
automatically issued.
'''
if data is None:
data = {}
if not data.get('name'):
data['name'] = 'thumbnail'
thumbnail_component = self.session.create_component(
path, data, location=None
)
origin_location = self.session.get(
'Location', ftrack_api_old.symbol.ORIGIN_LOCATION_ID
)
server_location = self.session.get(
'Location', ftrack_api_old.symbol.SERVER_LOCATION_ID
)
server_location.add_component(thumbnail_component, [origin_location])
# TODO: This commit can be avoided by reordering the operations in
# this method so that the component is transferred to ftrack.server
# after the thumbnail has been set.
#
# There is currently a bug in the API backend, causing the operations
# to *some* times be ordered wrongly, where the update occurs before
# the component has been created, causing an integrity error.
#
# Once this issue has been resolved, this commit can be removed and
# and the update placed between component creation and registration.
self['thumbnail_id'] = thumbnail_component['id']
self.session.commit()
return thumbnail_component

View file

@ -0,0 +1,431 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import logging
import uuid
import functools
import ftrack_api_old.attribute
import ftrack_api_old.entity.base
import ftrack_api_old.entity.location
import ftrack_api_old.entity.component
import ftrack_api_old.entity.asset_version
import ftrack_api_old.entity.project_schema
import ftrack_api_old.entity.note
import ftrack_api_old.entity.job
import ftrack_api_old.entity.user
import ftrack_api_old.symbol
import ftrack_api_old.cache
from ftrack_api_old.logging import LazyLogMessage as L
class Factory(object):
'''Entity class factory.'''
def __init__(self):
'''Initialise factory.'''
super(Factory, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.
*bases* should be a list of bases to give the constructed class. If not
specified, default to :class:`ftrack_api_old.entity.base.Entity`.
'''
entity_type = schema['id']
class_name = entity_type
class_bases = bases
if class_bases is None:
class_bases = [ftrack_api_old.entity.base.Entity]
class_namespace = dict()
# Build attributes for class.
attributes = ftrack_api_old.attribute.Attributes()
immutable = schema.get('immutable', [])
for name, fragment in schema.get('properties', {}).items():
mutable = name not in immutable
default = fragment.get('default', ftrack_api_old.symbol.NOT_SET)
if default == '{uid}':
default = lambda instance: str(uuid.uuid4())
data_type = fragment.get('type', ftrack_api_old.symbol.NOT_SET)
if data_type is not ftrack_api_old.symbol.NOT_SET:
if data_type in (
'string', 'boolean', 'integer', 'number', 'variable'
):
# Basic scalar attribute.
if data_type == 'number':
data_type = 'float'
if data_type == 'string':
data_format = fragment.get('format')
if data_format == 'date-time':
data_type = 'datetime'
attribute = self.create_scalar_attribute(
class_name, name, mutable, default, data_type
)
if attribute:
attributes.add(attribute)
elif data_type == 'array':
attribute = self.create_collection_attribute(
class_name, name, mutable
)
if attribute:
attributes.add(attribute)
elif data_type == 'mapped_array':
reference = fragment.get('items', {}).get('$ref')
if not reference:
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that does '
'not define a schema reference.', class_name, name
))
continue
attribute = self.create_mapped_collection_attribute(
class_name, name, mutable, reference
)
if attribute:
attributes.add(attribute)
else:
self.logger.debug(L(
'Skipping {0}.{1} attribute with unrecognised data '
'type {2}', class_name, name, data_type
))
else:
# Reference attribute.
reference = fragment.get('$ref', ftrack_api_old.symbol.NOT_SET)
if reference is ftrack_api_old.symbol.NOT_SET:
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that does '
'not define a schema reference.', class_name, name
))
continue
attribute = self.create_reference_attribute(
class_name, name, mutable, reference
)
if attribute:
attributes.add(attribute)
default_projections = schema.get('default_projections', [])
# Construct class.
class_namespace['entity_type'] = entity_type
class_namespace['attributes'] = attributes
class_namespace['primary_key_attributes'] = schema['primary_key'][:]
class_namespace['default_projections'] = default_projections
cls = type(
str(class_name), # type doesn't accept unicode.
tuple(class_bases),
class_namespace
)
return cls
def create_scalar_attribute(
self, class_name, name, mutable, default, data_type
):
'''Return appropriate scalar attribute instance.'''
return ftrack_api_old.attribute.ScalarAttribute(
name, data_type=data_type, default_value=default, mutable=mutable
)
def create_reference_attribute(self, class_name, name, mutable, reference):
'''Return appropriate reference attribute instance.'''
return ftrack_api_old.attribute.ReferenceAttribute(
name, reference, mutable=mutable
)
def create_collection_attribute(self, class_name, name, mutable):
'''Return appropriate collection attribute instance.'''
return ftrack_api_old.attribute.CollectionAttribute(
name, mutable=mutable
)
def create_mapped_collection_attribute(
self, class_name, name, mutable, reference
):
'''Return appropriate mapped collection attribute instance.'''
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that has '
'no implementation defined for reference {2}.',
class_name, name, reference
))
class PerSessionDefaultKeyMaker(ftrack_api_old.cache.KeyMaker):
'''Generate key for defaults.'''
def _key(self, obj):
'''Return key for *obj*.'''
if isinstance(obj, dict):
entity = obj.get('entity')
if entity is not None:
# Key by session only.
return str(id(entity.session))
return str(obj)
#: Memoiser for use with default callables that should only be called once per
# session.
memoise_defaults = ftrack_api_old.cache.memoise_decorator(
ftrack_api_old.cache.Memoiser(
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
)
)
#: Memoiser for use with callables that should be called once per session.
memoise_session = ftrack_api_old.cache.memoise_decorator(
ftrack_api_old.cache.Memoiser(
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
)
)
@memoise_session
def _get_custom_attribute_configurations(session):
'''Return list of custom attribute configurations.
The configuration objects will have key, project_id, id and object_type_id
populated.
'''
return session.query(
'select key, project_id, id, object_type_id, entity_type, '
'is_hierarchical from CustomAttributeConfiguration'
).all()
def _get_entity_configurations(entity):
'''Return all configurations for current collection entity.'''
entity_type = None
project_id = None
object_type_id = None
if 'object_type_id' in entity.keys():
project_id = entity['project_id']
entity_type = 'task'
object_type_id = entity['object_type_id']
if entity.entity_type == 'AssetVersion':
project_id = entity['asset']['parent']['project_id']
entity_type = 'assetversion'
if entity.entity_type == 'Project':
project_id = entity['id']
entity_type = 'show'
if entity.entity_type == 'User':
entity_type = 'user'
if entity.entity_type == 'Asset':
entity_type = 'asset'
if entity.entity_type in ('TypedContextList', 'AssetVersionList'):
entity_type = 'list'
if entity_type is None:
raise ValueError(
'Entity {!r} not supported.'.format(entity)
)
configurations = []
for configuration in _get_custom_attribute_configurations(
entity.session
):
if (
configuration['entity_type'] == entity_type and
configuration['project_id'] in (project_id, None) and
configuration['object_type_id'] == object_type_id
):
# The custom attribute configuration is for the target entity type.
configurations.append(configuration)
elif (
entity_type in ('asset', 'assetversion', 'show', 'task') and
configuration['project_id'] in (project_id, None) and
configuration['is_hierarchical']
):
# The target entity type allows hierarchical attributes.
configurations.append(configuration)
# Return with global configurations at the end of the list. This is done
# so that global conigurations are shadowed by project specific if the
# configurations list is looped when looking for a matching `key`.
return sorted(
configurations, key=lambda item: item['project_id'] is None
)
class StandardFactory(Factory):
'''Standard entity class factory.'''
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.'''
if not bases:
bases = []
extra_bases = []
# Customise classes.
if schema['id'] == 'ProjectSchema':
extra_bases = [ftrack_api_old.entity.project_schema.ProjectSchema]
elif schema['id'] == 'Location':
extra_bases = [ftrack_api_old.entity.location.Location]
elif schema['id'] == 'AssetVersion':
extra_bases = [ftrack_api_old.entity.asset_version.AssetVersion]
elif schema['id'].endswith('Component'):
extra_bases = [ftrack_api_old.entity.component.Component]
elif schema['id'] == 'Note':
extra_bases = [ftrack_api_old.entity.note.Note]
elif schema['id'] == 'Job':
extra_bases = [ftrack_api_old.entity.job.Job]
elif schema['id'] == 'User':
extra_bases = [ftrack_api_old.entity.user.User]
bases = extra_bases + bases
# If bases does not contain any items, add the base entity class.
if not bases:
bases = [ftrack_api_old.entity.base.Entity]
# Add mixins.
if 'notes' in schema.get('properties', {}):
bases.append(
ftrack_api_old.entity.note.CreateNoteMixin
)
if 'thumbnail_id' in schema.get('properties', {}):
bases.append(
ftrack_api_old.entity.component.CreateThumbnailMixin
)
cls = super(StandardFactory, self).create(schema, bases=bases)
return cls
def create_mapped_collection_attribute(
self, class_name, name, mutable, reference
):
'''Return appropriate mapped collection attribute instance.'''
if reference == 'Metadata':
def create_metadata(proxy, data, reference):
'''Return metadata for *data*.'''
entity = proxy.collection.entity
session = entity.session
data.update({
'parent_id': entity['id'],
'parent_type': entity.entity_type
})
return session.create(reference, data)
creator = functools.partial(
create_metadata, reference=reference
)
key_attribute = 'key'
value_attribute = 'value'
return ftrack_api_old.attribute.KeyValueMappedCollectionAttribute(
name, creator, key_attribute, value_attribute, mutable=mutable
)
elif reference == 'CustomAttributeValue':
return (
ftrack_api_old.attribute.CustomAttributeCollectionAttribute(
name, mutable=mutable
)
)
elif reference.endswith('CustomAttributeValue'):
def creator(proxy, data):
'''Create a custom attribute based on *proxy* and *data*.
Raise :py:exc:`KeyError` if related entity is already presisted
to the server. The proxy represents dense custom attribute
values and should never create new custom attribute values
through the proxy if entity exists on the remote.
If the entity is not persisted the ususal
<entity_type>CustomAttributeValue items cannot be updated as
the related entity does not exist on remote and values not in
the proxy. Instead a <entity_type>CustomAttributeValue will
be reconstructed and an update operation will be recorded.
'''
entity = proxy.collection.entity
if (
ftrack_api_old.inspection.state(entity) is not
ftrack_api_old.symbol.CREATED
):
raise KeyError(
'Custom attributes must be created explicitly for the '
'given entity type before being set.'
)
configuration = None
for candidate in _get_entity_configurations(entity):
if candidate['key'] == data['key']:
configuration = candidate
break
if configuration is None:
raise ValueError(
u'No valid custom attribute for data {0!r} was found.'
.format(data)
)
create_data = dict(data.items())
create_data['configuration_id'] = configuration['id']
create_data['entity_id'] = entity['id']
session = entity.session
# Create custom attribute by reconstructing it and update the
# value. This will prevent a create operation to be sent to the
# remote, as create operations for this entity type is not
# allowed. Instead an update operation will be recorded.
value = create_data.pop('value')
item = session.create(
reference,
create_data,
reconstructing=True
)
# Record update operation.
item['value'] = value
return item
key_attribute = 'key'
value_attribute = 'value'
return ftrack_api_old.attribute.KeyValueMappedCollectionAttribute(
name, creator, key_attribute, value_attribute, mutable=mutable
)
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that has no configuration '
'for reference {2}.', class_name, name, reference
))

View file

@ -0,0 +1,48 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api_old.entity.base
class Job(ftrack_api_old.entity.base.Entity):
'''Represent job.'''
def __init__(self, session, data=None, reconstructing=False):
'''Initialise entity.
*session* is an instance of :class:`ftrack_api_old.session.Session` that
this entity instance is bound to.
*data* is a mapping of key, value pairs to apply as initial attribute
values.
To set a job `description` visible in the web interface, *data* can
contain a key called `data` which should be a JSON serialised
dictionary containing description::
data = {
'status': 'running',
'data': json.dumps(dict(description='My job description.')),
...
}
Will raise a :py:exc:`ValueError` if *data* contains `type` and `type`
is set to something not equal to "api_job".
*reconstructing* indicates whether this entity is being reconstructed,
such as from a query, and therefore should not have any special creation
logic applied, such as initialising defaults for missing data.
'''
if not reconstructing:
if data.get('type') not in ('api_job', None):
raise ValueError(
'Invalid job type "{0}". Must be "api_job"'.format(
data.get('type')
)
)
super(Job, self).__init__(
session, data=data, reconstructing=reconstructing
)

View file

@ -0,0 +1,732 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import collections
import functools
import ftrack_api_old.entity.base
import ftrack_api_old.exception
import ftrack_api_old.event.base
import ftrack_api_old.symbol
import ftrack_api_old.inspection
from ftrack_api_old.logging import LazyLogMessage as L
class Location(ftrack_api_old.entity.base.Entity):
'''Represent storage for components.'''
def __init__(self, session, data=None, reconstructing=False):
'''Initialise entity.
*session* is an instance of :class:`ftrack_api_old.session.Session` that
this entity instance is bound to.
*data* is a mapping of key, value pairs to apply as initial attribute
values.
*reconstructing* indicates whether this entity is being reconstructed,
such as from a query, and therefore should not have any special creation
logic applied, such as initialising defaults for missing data.
'''
self.accessor = ftrack_api_old.symbol.NOT_SET
self.structure = ftrack_api_old.symbol.NOT_SET
self.resource_identifier_transformer = ftrack_api_old.symbol.NOT_SET
self.priority = 95
super(Location, self).__init__(
session, data=data, reconstructing=reconstructing
)
def __str__(self):
'''Return string representation of instance.'''
representation = super(Location, self).__str__()
with self.session.auto_populating(False):
name = self['name']
if name is not ftrack_api_old.symbol.NOT_SET:
representation = representation.replace(
'(', '("{0}", '.format(name)
)
return representation
def add_component(self, component, source, recursive=True):
'''Add *component* to location.
*component* should be a single component instance.
*source* should be an instance of another location that acts as the
source.
Raise :exc:`ftrack_api_old.ComponentInLocationError` if the *component*
already exists in this location.
Raise :exc:`ftrack_api_old.LocationError` if managing data and the generated
target structure for the component already exists according to the
accessor. This helps prevent potential data loss by avoiding overwriting
existing data. Note that there is a race condition between the check and
the write so if another process creates data at the same target during
that period it will be overwritten.
.. note::
A :meth:`Session.commit<ftrack_api_old.session.Session.commit>` may be
automatically issued as part of the component registration.
'''
return self.add_components(
[component], sources=source, recursive=recursive
)
def add_components(self, components, sources, recursive=True, _depth=0):
'''Add *components* to location.
*components* should be a list of component instances.
*sources* may be either a single source or a list of sources. If a list
then each corresponding index in *sources* will be used for each
*component*. A source should be an instance of another location.
Raise :exc:`ftrack_api_old.exception.ComponentInLocationError` if any
component in *components* already exists in this location. In this case,
no changes will be made and no data transferred.
Raise :exc:`ftrack_api_old.exception.LocationError` if managing data and the
generated target structure for the component already exists according to
the accessor. This helps prevent potential data loss by avoiding
overwriting existing data. Note that there is a race condition between
the check and the write so if another process creates data at the same
target during that period it will be overwritten.
.. note::
A :meth:`Session.commit<ftrack_api_old.session.Session.commit>` may be
automatically issued as part of the components registration.
.. important::
If this location manages data then the *components* data is first
transferred to the target prescribed by the structure plugin, using
the configured accessor. If any component fails to transfer then
:exc:`ftrack_api_old.exception.LocationError` is raised and none of the
components are registered with the database. In this case it is left
up to the caller to decide and act on manually cleaning up any
transferred data using the 'transferred' detail in the raised error.
Likewise, after transfer, all components are registered with the
database in a batch call. If any component causes an error then all
components will remain unregistered and
:exc:`ftrack_api_old.exception.LocationError` will be raised detailing
issues and any transferred data under the 'transferred' detail key.
'''
if (
isinstance(sources, basestring)
or not isinstance(sources, collections.Sequence)
):
sources = [sources]
sources_count = len(sources)
if sources_count not in (1, len(components)):
raise ValueError(
'sources must be either a single source or a sequence of '
'sources with indexes corresponding to passed components.'
)
if not self.structure:
raise ftrack_api_old.exception.LocationError(
'No structure defined for location {location}.',
details=dict(location=self)
)
if not components:
# Optimisation: Return early when no components to process, such as
# when called recursively on an empty sequence component.
return
indent = ' ' * (_depth + 1)
# Check that components not already added to location.
existing_components = []
try:
self.get_resource_identifiers(components)
except ftrack_api_old.exception.ComponentNotInLocationError as error:
missing_component_ids = [
missing_component['id']
for missing_component in error.details['components']
]
for component in components:
if component['id'] not in missing_component_ids:
existing_components.append(component)
else:
existing_components.extend(components)
if existing_components:
# Some of the components already present in location.
raise ftrack_api_old.exception.ComponentInLocationError(
existing_components, self
)
# Attempt to transfer each component's data to this location.
transferred = []
for index, component in enumerate(components):
try:
# Determine appropriate source.
if sources_count == 1:
source = sources[0]
else:
source = sources[index]
# Add members first for container components.
is_container = 'members' in component.keys()
if is_container and recursive:
self.add_components(
component['members'], source, recursive=recursive,
_depth=(_depth + 1)
)
# Add component to this location.
context = self._get_context(component, source)
resource_identifier = self.structure.get_resource_identifier(
component, context
)
# Manage data transfer.
self._add_data(component, resource_identifier, source)
except Exception as error:
raise ftrack_api_old.exception.LocationError(
'Failed to transfer component {component} data to location '
'{location} due to error:\n{indent}{error}\n{indent}'
'Transferred component data that may require cleanup: '
'{transferred}',
details=dict(
indent=indent,
component=component,
location=self,
error=error,
transferred=transferred
)
)
else:
transferred.append((component, resource_identifier))
# Register all successfully transferred components.
components_to_register = []
component_resource_identifiers = []
try:
for component, resource_identifier in transferred:
if self.resource_identifier_transformer:
# Optionally encode resource identifier before storing.
resource_identifier = (
self.resource_identifier_transformer.encode(
resource_identifier,
context={'component': component}
)
)
components_to_register.append(component)
component_resource_identifiers.append(resource_identifier)
# Store component in location information.
self._register_components_in_location(
components, component_resource_identifiers
)
except Exception as error:
raise ftrack_api_old.exception.LocationError(
'Failed to register components with location {location} due to '
'error:\n{indent}{error}\n{indent}Transferred component data '
'that may require cleanup: {transferred}',
details=dict(
indent=indent,
location=self,
error=error,
transferred=transferred
)
)
# Publish events.
for component in components_to_register:
component_id = ftrack_api_old.inspection.primary_key(
component
).values()[0]
location_id = ftrack_api_old.inspection.primary_key(self).values()[0]
self.session.event_hub.publish(
ftrack_api_old.event.base.Event(
topic=ftrack_api_old.symbol.COMPONENT_ADDED_TO_LOCATION_TOPIC,
data=dict(
component_id=component_id,
location_id=location_id
),
),
on_error='ignore'
)
def _get_context(self, component, source):
'''Return context for *component* and *source*.'''
context = {}
if source:
try:
source_resource_identifier = source.get_resource_identifier(
component
)
except ftrack_api_old.exception.ComponentNotInLocationError:
pass
else:
context.update(dict(
source_resource_identifier=source_resource_identifier
))
return context
def _add_data(self, component, resource_identifier, source):
'''Manage transfer of *component* data from *source*.
*resource_identifier* specifies the identifier to use with this
locations accessor.
'''
self.logger.debug(L(
'Adding data for component {0!r} from source {1!r} to location '
'{2!r} using resource identifier {3!r}.',
component, resource_identifier, source, self
))
# Read data from source and write to this location.
if not source.accessor:
raise ftrack_api_old.exception.LocationError(
'No accessor defined for source location {location}.',
details=dict(location=source)
)
if not self.accessor:
raise ftrack_api_old.exception.LocationError(
'No accessor defined for target location {location}.',
details=dict(location=self)
)
is_container = 'members' in component.keys()
if is_container:
# TODO: Improve this check. Possibly introduce an inspection
# such as ftrack_api_old.inspection.is_sequence_component.
if component.entity_type != 'SequenceComponent':
self.accessor.make_container(resource_identifier)
else:
# Try to make container of component.
try:
container = self.accessor.get_container(
resource_identifier
)
except ftrack_api_old.exception.AccessorParentResourceNotFoundError:
# Container could not be retrieved from
# resource_identifier. Assume that there is no need to
# make the container.
pass
else:
# No need for existence check as make_container does not
# recreate existing containers.
self.accessor.make_container(container)
if self.accessor.exists(resource_identifier):
# Note: There is a race condition here in that the
# data may be added externally between the check for
# existence and the actual write which would still
# result in potential data loss. However, there is no
# good cross platform, cross accessor solution for this
# at present.
raise ftrack_api_old.exception.LocationError(
'Cannot add component as data already exists and '
'overwriting could result in data loss. Computed '
'target resource identifier was: {0}'
.format(resource_identifier)
)
# Read and write data.
source_data = source.accessor.open(
source.get_resource_identifier(component), 'rb'
)
target_data = self.accessor.open(resource_identifier, 'wb')
# Read/write data in chunks to avoid reading all into memory at the
# same time.
chunked_read = functools.partial(
source_data.read, ftrack_api_old.symbol.CHUNK_SIZE
)
for chunk in iter(chunked_read, ''):
target_data.write(chunk)
target_data.close()
source_data.close()
def _register_component_in_location(self, component, resource_identifier):
'''Register *component* in location against *resource_identifier*.'''
return self._register_components_in_location(
[component], [resource_identifier]
)
def _register_components_in_location(
self, components, resource_identifiers
):
'''Register *components* in location against *resource_identifiers*.
Indices of *components* and *resource_identifiers* should align.
'''
for component, resource_identifier in zip(
components, resource_identifiers
):
self.session.create(
'ComponentLocation', data=dict(
component=component,
location=self,
resource_identifier=resource_identifier
)
)
self.session.commit()
def remove_component(self, component, recursive=True):
'''Remove *component* from location.
.. note::
A :meth:`Session.commit<ftrack_api_old.session.Session.commit>` may be
automatically issued as part of the component deregistration.
'''
return self.remove_components([component], recursive=recursive)
def remove_components(self, components, recursive=True):
'''Remove *components* from location.
.. note::
A :meth:`Session.commit<ftrack_api_old.session.Session.commit>` may be
automatically issued as part of the components deregistration.
'''
for component in components:
# Check component is in this location
self.get_resource_identifier(component)
# Remove members first for container components.
is_container = 'members' in component.keys()
if is_container and recursive:
self.remove_components(
component['members'], recursive=recursive
)
# Remove data.
self._remove_data(component)
# Remove metadata.
self._deregister_component_in_location(component)
# Emit event.
component_id = ftrack_api_old.inspection.primary_key(
component
).values()[0]
location_id = ftrack_api_old.inspection.primary_key(self).values()[0]
self.session.event_hub.publish(
ftrack_api_old.event.base.Event(
topic=ftrack_api_old.symbol.COMPONENT_REMOVED_FROM_LOCATION_TOPIC,
data=dict(
component_id=component_id,
location_id=location_id
)
),
on_error='ignore'
)
def _remove_data(self, component):
'''Remove data associated with *component*.'''
if not self.accessor:
raise ftrack_api_old.exception.LocationError(
'No accessor defined for location {location}.',
details=dict(location=self)
)
try:
self.accessor.remove(
self.get_resource_identifier(component)
)
except ftrack_api_old.exception.AccessorResourceNotFoundError:
# If accessor does not support detecting sequence paths then an
# AccessorResourceNotFoundError is raised. For now, if the
# component type is 'SequenceComponent' assume success.
if not component.entity_type == 'SequenceComponent':
raise
def _deregister_component_in_location(self, component):
'''Deregister *component* from location.'''
component_id = ftrack_api_old.inspection.primary_key(component).values()[0]
location_id = ftrack_api_old.inspection.primary_key(self).values()[0]
# TODO: Use session.get for optimisation.
component_location = self.session.query(
'ComponentLocation where component_id is {0} and location_id is '
'{1}'.format(component_id, location_id)
)[0]
self.session.delete(component_location)
# TODO: Should auto-commit here be optional?
self.session.commit()
def get_component_availability(self, component):
'''Return availability of *component* in this location as a float.'''
return self.session.get_component_availability(
component, locations=[self]
)[self['id']]
def get_component_availabilities(self, components):
'''Return availabilities of *components* in this location.
Return list of float values corresponding to each component.
'''
return [
availability[self['id']] for availability in
self.session.get_component_availabilities(
components, locations=[self]
)
]
def get_resource_identifier(self, component):
'''Return resource identifier for *component*.
Raise :exc:`ftrack_api_old.exception.ComponentNotInLocationError` if the
component is not present in this location.
'''
return self.get_resource_identifiers([component])[0]
def get_resource_identifiers(self, components):
'''Return resource identifiers for *components*.
Raise :exc:`ftrack_api_old.exception.ComponentNotInLocationError` if any
of the components are not present in this location.
'''
resource_identifiers = self._get_resource_identifiers(components)
# Optionally decode resource identifier.
if self.resource_identifier_transformer:
for index, resource_identifier in enumerate(resource_identifiers):
resource_identifiers[index] = (
self.resource_identifier_transformer.decode(
resource_identifier
)
)
return resource_identifiers
def _get_resource_identifiers(self, components):
'''Return resource identifiers for *components*.
Raise :exc:`ftrack_api_old.exception.ComponentNotInLocationError` if any
of the components are not present in this location.
'''
component_ids_mapping = collections.OrderedDict()
for component in components:
component_id = ftrack_api_old.inspection.primary_key(
component
).values()[0]
component_ids_mapping[component_id] = component
component_locations = self.session.query(
'select component_id, resource_identifier from ComponentLocation '
'where location_id is {0} and component_id in ({1})'
.format(
ftrack_api_old.inspection.primary_key(self).values()[0],
', '.join(component_ids_mapping.keys())
)
)
resource_identifiers_map = {}
for component_location in component_locations:
resource_identifiers_map[component_location['component_id']] = (
component_location['resource_identifier']
)
resource_identifiers = []
missing = []
for component_id, component in component_ids_mapping.items():
if component_id not in resource_identifiers_map:
missing.append(component)
else:
resource_identifiers.append(
resource_identifiers_map[component_id]
)
if missing:
raise ftrack_api_old.exception.ComponentNotInLocationError(
missing, self
)
return resource_identifiers
def get_filesystem_path(self, component):
'''Return filesystem path for *component*.'''
return self.get_filesystem_paths([component])[0]
def get_filesystem_paths(self, components):
'''Return filesystem paths for *components*.'''
resource_identifiers = self.get_resource_identifiers(components)
filesystem_paths = []
for resource_identifier in resource_identifiers:
filesystem_paths.append(
self.accessor.get_filesystem_path(resource_identifier)
)
return filesystem_paths
def get_url(self, component):
'''Return url for *component*.
Raise :exc:`~ftrack_api_old.exception.AccessorFilesystemPathError` if
URL could not be determined from *component* or
:exc:`~ftrack_api_old.exception.AccessorUnsupportedOperationError` if
retrieving URL is not supported by the location's accessor.
'''
resource_identifier = self.get_resource_identifier(component)
return self.accessor.get_url(resource_identifier)
class MemoryLocationMixin(object):
'''Represent storage for components.
Unlike a standard location, only store metadata for components in this
location in memory rather than persisting to the database.
'''
@property
def _cache(self):
'''Return cache.'''
try:
cache = self.__cache
except AttributeError:
cache = self.__cache = {}
return cache
def _register_component_in_location(self, component, resource_identifier):
'''Register *component* in location with *resource_identifier*.'''
component_id = ftrack_api_old.inspection.primary_key(component).values()[0]
self._cache[component_id] = resource_identifier
def _register_components_in_location(
self, components, resource_identifiers
):
'''Register *components* in location against *resource_identifiers*.
Indices of *components* and *resource_identifiers* should align.
'''
for component, resource_identifier in zip(
components, resource_identifiers
):
self._register_component_in_location(component, resource_identifier)
def _deregister_component_in_location(self, component):
'''Deregister *component* in location.'''
component_id = ftrack_api_old.inspection.primary_key(component).values()[0]
self._cache.pop(component_id)
def _get_resource_identifiers(self, components):
'''Return resource identifiers for *components*.
Raise :exc:`ftrack_api_old.exception.ComponentNotInLocationError` if any
of the referenced components are not present in this location.
'''
resource_identifiers = []
missing = []
for component in components:
component_id = ftrack_api_old.inspection.primary_key(
component
).values()[0]
resource_identifier = self._cache.get(component_id)
if resource_identifier is None:
missing.append(component)
else:
resource_identifiers.append(resource_identifier)
if missing:
raise ftrack_api_old.exception.ComponentNotInLocationError(
missing, self
)
return resource_identifiers
class UnmanagedLocationMixin(object):
'''Location that does not manage data.'''
def _add_data(self, component, resource_identifier, source):
'''Manage transfer of *component* data from *source*.
*resource_identifier* specifies the identifier to use with this
locations accessor.
Overridden to have no effect.
'''
return
def _remove_data(self, component):
'''Remove data associated with *component*.
Overridden to have no effect.
'''
return
class OriginLocationMixin(MemoryLocationMixin, UnmanagedLocationMixin):
'''Special origin location that expects sources as filepaths.'''
def _get_context(self, component, source):
'''Return context for *component* and *source*.'''
context = {}
if source:
context.update(dict(
source_resource_identifier=source
))
return context
class ServerLocationMixin(object):
'''Location representing ftrack server.
Adds convenience methods to location, specific to ftrack server.
'''
def get_thumbnail_url(self, component, size=None):
'''Return thumbnail url for *component*.
Optionally, specify *size* to constrain the downscaled image to size
x size pixels.
Raise :exc:`~ftrack_api_old.exception.AccessorFilesystemPathError` if
URL could not be determined from *resource_identifier* or
:exc:`~ftrack_api_old.exception.AccessorUnsupportedOperationError` if
retrieving URL is not supported by the location's accessor.
'''
resource_identifier = self.get_resource_identifier(component)
return self.accessor.get_thumbnail_url(resource_identifier, size)

View file

@ -0,0 +1,68 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api_old.entity.base
class Note(ftrack_api_old.entity.base.Entity):
'''Represent a note.'''
def create_reply(
self, content, author
):
'''Create a reply with *content* and *author*.
.. note::
This is a helper method. To create replies manually use the
standard :meth:`Session.create` method.
'''
reply = self.session.create(
'Note', {
'author': author,
'content': content
}
)
self['replies'].append(reply)
return reply
class CreateNoteMixin(object):
'''Mixin to add create_note method on entity class.'''
def create_note(self, content, author, recipients=None, category=None):
'''Create note with *content*, *author*.
Note category can be set by including *category* and *recipients*
can be specified as a list of user or group instances.
'''
if not recipients:
recipients = []
category_id = None
if category:
category_id = category['id']
data = {
'content': content,
'author': author,
'category_id': category_id
}
note = self.session.create('Note', data)
self['notes'].append(note)
for resource in recipients:
recipient = self.session.create('Recipient', {
'note_id': note['id'],
'resource_id': resource['id']
})
note['recipients'].append(recipient)
return note

View file

@ -0,0 +1,94 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api_old.entity.base
class ProjectSchema(ftrack_api_old.entity.base.Entity):
'''Class representing ProjectSchema.'''
def get_statuses(self, schema, type_id=None):
'''Return statuses for *schema* and optional *type_id*.
*type_id* is the id of the Type for a TypedContext and can be used to
get statuses where the workflow has been overridden.
'''
# Task has overrides and need to be handled separately.
if schema == 'Task':
if type_id is not None:
overrides = self['_overrides']
for override in overrides:
if override['type_id'] == type_id:
return override['workflow_schema']['statuses'][:]
return self['_task_workflow']['statuses'][:]
elif schema == 'AssetVersion':
return self['_version_workflow']['statuses'][:]
else:
try:
EntityTypeClass = self.session.types[schema]
except KeyError:
raise ValueError('Schema {0} does not exist.'.format(schema))
object_type_id_attribute = EntityTypeClass.attributes.get(
'object_type_id'
)
try:
object_type_id = object_type_id_attribute.default_value
except AttributeError:
raise ValueError(
'Schema {0} does not have statuses.'.format(schema)
)
for _schema in self['_schemas']:
if _schema['type_id'] == object_type_id:
result = self.session.query(
'select task_status from SchemaStatus '
'where schema_id is {0}'.format(_schema['id'])
)
return [
schema_type['task_status'] for schema_type in result
]
raise ValueError(
'No valid statuses were found for schema {0}.'.format(schema)
)
def get_types(self, schema):
'''Return types for *schema*.'''
# Task need to be handled separately.
if schema == 'Task':
return self['_task_type_schema']['types'][:]
else:
try:
EntityTypeClass = self.session.types[schema]
except KeyError:
raise ValueError('Schema {0} does not exist.'.format(schema))
object_type_id_attribute = EntityTypeClass.attributes.get(
'object_type_id'
)
try:
object_type_id = object_type_id_attribute.default_value
except AttributeError:
raise ValueError(
'Schema {0} does not have types.'.format(schema)
)
for _schema in self['_schemas']:
if _schema['type_id'] == object_type_id:
result = self.session.query(
'select task_type from SchemaType '
'where schema_id is {0}'.format(_schema['id'])
)
return [schema_type['task_type'] for schema_type in result]
raise ValueError(
'No valid types were found for schema {0}.'.format(schema)
)

View file

@ -0,0 +1,123 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import arrow
import ftrack_api_old.entity.base
import ftrack_api_old.exception
class User(ftrack_api_old.entity.base.Entity):
'''Represent a user.'''
def start_timer(self, context=None, comment='', name=None, force=False):
'''Start a timer for *context* and return it.
*force* can be used to automatically stop an existing timer and create a
timelog for it. If you need to get access to the created timelog, use
:func:`stop_timer` instead.
*comment* and *name* are optional but will be set on the timer.
.. note::
This method will automatically commit the changes and if *force* is
False then it will fail with a
:class:`ftrack_api_old.exception.NotUniqueError` exception if a
timer is already running.
'''
if force:
try:
self.stop_timer()
except ftrack_api_old.exception.NoResultFoundError:
self.logger.debug('Failed to stop existing timer.')
timer = self.session.create('Timer', {
'user': self,
'context': context,
'name': name,
'comment': comment
})
# Commit the new timer and try to catch any error that indicate another
# timelog already exists and inform the user about it.
try:
self.session.commit()
except ftrack_api_old.exception.ServerError as error:
if 'IntegrityError' in str(error):
raise ftrack_api_old.exception.NotUniqueError(
('Failed to start a timelog for user with id: {0}, it is '
'likely that a timer is already running. Either use '
'force=True or stop the timer first.').format(self['id'])
)
else:
# Reraise the error as it might be something unrelated.
raise
return timer
def stop_timer(self):
'''Stop the current timer and return a timelog created from it.
If a timer is not running, a
:exc:`ftrack_api_old.exception.NoResultFoundError` exception will be
raised.
.. note::
This method will automatically commit the changes.
'''
timer = self.session.query(
'Timer where user_id = "{0}"'.format(self['id'])
).one()
# If the server is running in the same timezone as the local
# timezone, we remove the TZ offset to get the correct duration.
is_timezone_support_enabled = self.session.server_information.get(
'is_timezone_support_enabled', None
)
if is_timezone_support_enabled is None:
self.logger.warning(
'Could not identify if server has timezone support enabled. '
'Will assume server is running in UTC.'
)
is_timezone_support_enabled = True
if is_timezone_support_enabled:
now = arrow.now()
else:
now = arrow.now().replace(tzinfo='utc')
delta = now - timer['start']
duration = delta.days * 24 * 60 * 60 + delta.seconds
timelog = self.session.create('Timelog', {
'user_id': timer['user_id'],
'context_id': timer['context_id'],
'comment': timer['comment'],
'start': timer['start'],
'duration': duration,
'name': timer['name']
})
self.session.delete(timer)
self.session.commit()
return timelog
def send_invite(self):
'''Send a invation email to the user'''
self.session.send_user_invite(
self
)
def reset_api_key(self):
'''Reset the users api key.'''
response = self.session.reset_remote(
'api_key', entity=self
)
return response['api_key']

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,85 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import uuid
import collections
class Event(collections.MutableMapping):
'''Represent a single event.'''
def __init__(self, topic, id=None, data=None, sent=None,
source=None, target='', in_reply_to_event=None):
'''Initialise event.
*topic* is the required topic for the event. It can use a dotted
notation to demarcate groupings. For example, 'ftrack.update'.
*id* is the unique id for this event instance. It is primarily used when
replying to an event. If not supplied a default uuid based value will
be used.
*data* refers to event specific data. It should be a mapping structure
and defaults to an empty dictionary if not supplied.
*sent* is the timestamp the event is sent. It will be set automatically
as send time unless specified here.
*source* is information about where the event originated. It should be
a mapping and include at least a unique id value under an 'id' key. If
not specified, senders usually populate the value automatically at
publish time.
*target* can be an expression that targets this event. For example,
a reply event would target the event to the sender of the source event.
The expression will be tested against subscriber information only.
*in_reply_to_event* is used when replying to an event and should contain
the unique id of the event being replied to.
'''
super(Event, self).__init__()
self._data = dict(
id=id or uuid.uuid4().hex,
data=data or {},
topic=topic,
sent=sent,
source=source or {},
target=target,
in_reply_to_event=in_reply_to_event
)
self._stopped = False
def stop(self):
'''Stop further processing of this event.'''
self._stopped = True
def is_stopped(self):
'''Return whether event has been stopped.'''
return self._stopped
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data)

View file

@ -0,0 +1,281 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from operator import eq, ne, ge, le, gt, lt
from pyparsing import (ParserElement, Group, Word, CaselessKeyword, Forward,
FollowedBy, Suppress, oneOf, OneOrMore, Optional,
alphanums, quotedString, removeQuotes)
import ftrack_api_old.exception
# Optimise parsing using packrat memoisation feature.
ParserElement.enablePackrat()
class Parser(object):
'''Parse string based expression into :class:`Expression` instance.'''
def __init__(self):
'''Initialise parser.'''
self._operators = {
'=': eq,
'!=': ne,
'>=': ge,
'<=': le,
'>': gt,
'<': lt
}
self._parser = self._construct_parser()
super(Parser, self).__init__()
def _construct_parser(self):
'''Construct and return parser.'''
field = Word(alphanums + '_.')
operator = oneOf(self._operators.keys())
value = Word(alphanums + '-_,./*@+')
quoted_value = quotedString('quoted_value').setParseAction(removeQuotes)
condition = Group(
field + operator + (quoted_value | value)
)('condition')
not_ = Optional(Suppress(CaselessKeyword('not')))('not')
and_ = Suppress(CaselessKeyword('and'))('and')
or_ = Suppress(CaselessKeyword('or'))('or')
expression = Forward()
parenthesis = Suppress('(') + expression + Suppress(')')
previous = condition | parenthesis
for conjunction in (not_, and_, or_):
current = Forward()
if conjunction in (and_, or_):
conjunction_expression = (
FollowedBy(previous + conjunction + previous)
+ Group(
previous + OneOrMore(conjunction + previous)
)(conjunction.resultsName)
)
elif conjunction in (not_, ):
conjunction_expression = (
FollowedBy(conjunction.expr + current)
+ Group(conjunction + current)(conjunction.resultsName)
)
else: # pragma: no cover
raise ValueError('Unrecognised conjunction.')
current <<= (conjunction_expression | previous)
previous = current
expression <<= previous
return expression('expression')
def parse(self, expression):
'''Parse string *expression* into :class:`Expression`.
Raise :exc:`ftrack_api_old.exception.ParseError` if *expression* could
not be parsed.
'''
result = None
expression = expression.strip()
if expression:
try:
result = self._parser.parseString(
expression, parseAll=True
)
except Exception as error:
raise ftrack_api_old.exception.ParseError(
'Failed to parse: {0}. {1}'.format(expression, error)
)
return self._process(result)
def _process(self, result):
'''Process *result* using appropriate method.
Method called is determined by the name of the result.
'''
method_name = '_process_{0}'.format(result.getName())
method = getattr(self, method_name)
return method(result)
def _process_expression(self, result):
'''Process *result* as expression.'''
return self._process(result[0])
def _process_not(self, result):
'''Process *result* as NOT operation.'''
return Not(self._process(result[0]))
def _process_and(self, result):
'''Process *result* as AND operation.'''
return All([self._process(entry) for entry in result])
def _process_or(self, result):
'''Process *result* as OR operation.'''
return Any([self._process(entry) for entry in result])
def _process_condition(self, result):
'''Process *result* as condition.'''
key, operator, value = result
return Condition(key, self._operators[operator], value)
def _process_quoted_value(self, result):
'''Process *result* as quoted value.'''
return result
class Expression(object):
'''Represent a structured expression to test candidates against.'''
def __str__(self):
'''Return string representation.'''
return '<{0}>'.format(self.__class__.__name__)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return True
class All(Expression):
'''Match candidate that matches all of the specified expressions.
.. note::
If no expressions are supplied then will always match.
'''
def __init__(self, expressions=None):
'''Initialise with list of *expressions* to match against.'''
self._expressions = expressions or []
super(All, self).__init__()
def __str__(self):
'''Return string representation.'''
return '<{0} [{1}]>'.format(
self.__class__.__name__,
' '.join(map(str, self._expressions))
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return all([
expression.match(candidate) for expression in self._expressions
])
class Any(Expression):
'''Match candidate that matches any of the specified expressions.
.. note::
If no expressions are supplied then will never match.
'''
def __init__(self, expressions=None):
'''Initialise with list of *expressions* to match against.'''
self._expressions = expressions or []
super(Any, self).__init__()
def __str__(self):
'''Return string representation.'''
return '<{0} [{1}]>'.format(
self.__class__.__name__,
' '.join(map(str, self._expressions))
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return any([
expression.match(candidate) for expression in self._expressions
])
class Not(Expression):
'''Negate expression.'''
def __init__(self, expression):
'''Initialise with *expression* to negate.'''
self._expression = expression
super(Not, self).__init__()
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__,
self._expression
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return not self._expression.match(candidate)
class Condition(Expression):
'''Represent condition.'''
def __init__(self, key, operator, value):
'''Initialise condition.
*key* is the key to check on the data when matching. It can be a nested
key represented by dots. For example, 'data.eventType' would attempt to
match candidate['data']['eventType']. If the candidate is missing any
of the requested keys then the match fails immediately.
*operator* is the operator function to use to perform the match between
the retrieved candidate value and the conditional *value*.
If *value* is a string, it can use a wildcard '*' at the end to denote
that any values matching the substring portion are valid when matching
equality only.
'''
self._key = key
self._operator = operator
self._value = value
self._wildcard = '*'
self._operatorMapping = {
eq: '=',
ne: '!=',
ge: '>=',
le: '<=',
gt: '>',
lt: '<'
}
def __str__(self):
'''Return string representation.'''
return '<{0} {1}{2}{3}>'.format(
self.__class__.__name__,
self._key,
self._operatorMapping.get(self._operator, self._operator),
self._value
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
key_parts = self._key.split('.')
try:
value = candidate
for keyPart in key_parts:
value = value[keyPart]
except (KeyError, TypeError):
return False
if (
self._operator is eq
and isinstance(self._value, basestring)
and self._value[-1] == self._wildcard
):
return self._value[:-1] in value
else:
return self._operator(value, self._value)

1053
pype/vendor/ftrack_api_old/event/hub.py vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import ftrack_api_old.event.subscription
class Subscriber(object):
'''Represent event subscriber.'''
def __init__(self, subscription, callback, metadata, priority):
'''Initialise subscriber.'''
self.subscription = ftrack_api_old.event.subscription.Subscription(
subscription
)
self.callback = callback
self.metadata = metadata
self.priority = priority
def __str__(self):
'''Return string representation.'''
return '<{0} metadata={1} subscription="{2}">'.format(
self.__class__.__name__, self.metadata, self.subscription
)
def interested_in(self, event):
'''Return whether subscriber interested in *event*.'''
return self.subscription.includes(event)

View file

@ -0,0 +1,23 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import ftrack_api_old.event.expression
class Subscription(object):
'''Represent a subscription.'''
parser = ftrack_api_old.event.expression.Parser()
def __init__(self, subscription):
'''Initialise with *subscription*.'''
self._subscription = subscription
self._expression = self.parser.parse(subscription)
def __str__(self):
'''Return string representation.'''
return self._subscription
def includes(self, event):
'''Return whether subscription includes *event*.'''
return self._expression.match(event)

392
pype/vendor/ftrack_api_old/exception.py vendored Normal file
View file

@ -0,0 +1,392 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import sys
import traceback
import ftrack_api_old.entity.base
class Error(Exception):
'''ftrack specific error.'''
default_message = 'Unspecified error occurred.'
def __init__(self, message=None, details=None):
'''Initialise exception with *message*.
If *message* is None, the class 'default_message' will be used.
*details* should be a mapping of extra information that can be used in
the message and also to provide more context.
'''
if message is None:
message = self.default_message
self.message = message
self.details = details
if self.details is None:
self.details = {}
self.traceback = traceback.format_exc()
def __str__(self):
'''Return string representation.'''
keys = {}
for key, value in self.details.iteritems():
if isinstance(value, unicode):
value = value.encode(sys.getfilesystemencoding())
keys[key] = value
return str(self.message.format(**keys))
class AuthenticationError(Error):
'''Raise when an authentication error occurs.'''
default_message = 'Authentication error.'
class ServerError(Error):
'''Raise when the server reports an error.'''
default_message = 'Server reported error processing request.'
class ServerCompatibilityError(ServerError):
'''Raise when server appears incompatible.'''
default_message = 'Server incompatible.'
class NotFoundError(Error):
'''Raise when something that should exist is not found.'''
default_message = 'Not found.'
class NotUniqueError(Error):
'''Raise when unique value required and duplicate detected.'''
default_message = 'Non-unique value detected.'
class IncorrectResultError(Error):
'''Raise when a result is incorrect.'''
default_message = 'Incorrect result detected.'
class NoResultFoundError(IncorrectResultError):
'''Raise when a result was expected but no result was found.'''
default_message = 'Expected result, but no result was found.'
class MultipleResultsFoundError(IncorrectResultError):
'''Raise when a single result expected, but multiple results found.'''
default_message = 'Expected single result, but received multiple results.'
class EntityTypeError(Error):
'''Raise when an entity type error occurs.'''
default_message = 'Entity type error.'
class UnrecognisedEntityTypeError(EntityTypeError):
'''Raise when an unrecognised entity type detected.'''
default_message = 'Entity type "{entity_type}" not recognised.'
def __init__(self, entity_type, **kw):
'''Initialise with *entity_type* that is unrecognised.'''
kw.setdefault('details', {}).update(dict(
entity_type=entity_type
))
super(UnrecognisedEntityTypeError, self).__init__(**kw)
class OperationError(Error):
'''Raise when an operation error occurs.'''
default_message = 'Operation error.'
class InvalidStateError(Error):
'''Raise when an invalid state detected.'''
default_message = 'Invalid state.'
class InvalidStateTransitionError(InvalidStateError):
'''Raise when an invalid state transition detected.'''
default_message = (
'Invalid transition from {current_state!r} to {target_state!r} state '
'for entity {entity!r}'
)
def __init__(self, current_state, target_state, entity, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
current_state=current_state,
target_state=target_state,
entity=entity
))
super(InvalidStateTransitionError, self).__init__(**kw)
class AttributeError(Error):
'''Raise when an error related to an attribute occurs.'''
default_message = 'Attribute error.'
class ImmutableAttributeError(AttributeError):
'''Raise when modification of immutable attribute attempted.'''
default_message = (
'Cannot modify value of immutable {attribute.name!r} attribute.'
)
def __init__(self, attribute, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
attribute=attribute
))
super(ImmutableAttributeError, self).__init__(**kw)
class CollectionError(Error):
'''Raise when an error related to collections occurs.'''
default_message = 'Collection error.'
def __init__(self, collection, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
collection=collection
))
super(CollectionError, self).__init__(**kw)
class ImmutableCollectionError(CollectionError):
'''Raise when modification of immutable collection attempted.'''
default_message = (
'Cannot modify value of immutable collection {collection!r}.'
)
class DuplicateItemInCollectionError(CollectionError):
'''Raise when duplicate item in collection detected.'''
default_message = (
'Item {item!r} already exists in collection {collection!r}.'
)
def __init__(self, item, collection, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
item=item
))
super(DuplicateItemInCollectionError, self).__init__(collection, **kw)
class ParseError(Error):
'''Raise when a parsing error occurs.'''
default_message = 'Failed to parse.'
class EventHubError(Error):
'''Raise when issues related to event hub occur.'''
default_message = 'Event hub error occurred.'
class EventHubConnectionError(EventHubError):
'''Raise when event hub encounters connection problem.'''
default_message = 'Event hub is not connected.'
class EventHubPacketError(EventHubError):
'''Raise when event hub encounters an issue with a packet.'''
default_message = 'Invalid packet.'
class PermissionDeniedError(Error):
'''Raise when permission is denied.'''
default_message = 'Permission denied.'
class LocationError(Error):
'''Base for errors associated with locations.'''
default_message = 'Unspecified location error'
class ComponentNotInAnyLocationError(LocationError):
'''Raise when component not available in any location.'''
default_message = 'Component not available in any location.'
class ComponentNotInLocationError(LocationError):
'''Raise when component(s) not in location.'''
default_message = (
'Component(s) {formatted_components} not found in location {location}.'
)
def __init__(self, components, location, **kw):
'''Initialise with *components* and *location*.'''
if isinstance(components, ftrack_api_old.entity.base.Entity):
components = [components]
kw.setdefault('details', {}).update(dict(
components=components,
formatted_components=', '.join(
[str(component) for component in components]
),
location=location
))
super(ComponentNotInLocationError, self).__init__(**kw)
class ComponentInLocationError(LocationError):
'''Raise when component(s) already exists in location.'''
default_message = (
'Component(s) {formatted_components} already exist in location '
'{location}.'
)
def __init__(self, components, location, **kw):
'''Initialise with *components* and *location*.'''
if isinstance(components, ftrack_api_old.entity.base.Entity):
components = [components]
kw.setdefault('details', {}).update(dict(
components=components,
formatted_components=', '.join(
[str(component) for component in components]
),
location=location
))
super(ComponentInLocationError, self).__init__(**kw)
class AccessorError(Error):
'''Base for errors associated with accessors.'''
default_message = 'Unspecified accessor error'
class AccessorOperationFailedError(AccessorError):
'''Base for failed operations on accessors.'''
default_message = 'Operation {operation} failed: {error}'
def __init__(
self, operation='', resource_identifier=None, error=None, **kw
):
kw.setdefault('details', {}).update(dict(
operation=operation,
resource_identifier=resource_identifier,
error=error
))
super(AccessorOperationFailedError, self).__init__(**kw)
class AccessorUnsupportedOperationError(AccessorOperationFailedError):
'''Raise when operation is unsupported.'''
default_message = 'Operation {operation} unsupported.'
class AccessorPermissionDeniedError(AccessorOperationFailedError):
'''Raise when permission denied.'''
default_message = (
'Cannot {operation} {resource_identifier}. Permission denied.'
)
class AccessorResourceIdentifierError(AccessorError):
'''Raise when a error related to a resource_identifier occurs.'''
default_message = 'Resource identifier is invalid: {resource_identifier}.'
def __init__(self, resource_identifier, **kw):
kw.setdefault('details', {}).update(dict(
resource_identifier=resource_identifier
))
super(AccessorResourceIdentifierError, self).__init__(**kw)
class AccessorFilesystemPathError(AccessorResourceIdentifierError):
'''Raise when a error related to an accessor filesystem path occurs.'''
default_message = (
'Could not determine filesystem path from resource identifier: '
'{resource_identifier}.'
)
class AccessorResourceError(AccessorError):
'''Base for errors associated with specific resource.'''
default_message = 'Unspecified resource error: {resource_identifier}'
def __init__(self, operation='', resource_identifier=None, error=None,
**kw):
kw.setdefault('details', {}).update(dict(
operation=operation,
resource_identifier=resource_identifier
))
super(AccessorResourceError, self).__init__(**kw)
class AccessorResourceNotFoundError(AccessorResourceError):
'''Raise when a required resource is not found.'''
default_message = 'Resource not found: {resource_identifier}'
class AccessorParentResourceNotFoundError(AccessorResourceError):
'''Raise when a parent resource (such as directory) is not found.'''
default_message = 'Parent resource is missing: {resource_identifier}'
class AccessorResourceInvalidError(AccessorResourceError):
'''Raise when a resource is not the right type.'''
default_message = 'Resource invalid: {resource_identifier}'
class AccessorContainerNotEmptyError(AccessorResourceError):
'''Raise when container is not empty.'''
default_message = 'Container is not empty: {resource_identifier}'
class StructureError(Error):
'''Base for errors associated with structures.'''
default_message = 'Unspecified structure error'
class ConnectionClosedError(Error):
'''Raise when attempt to use closed connection detected.'''
default_message = "Connection closed."

131
pype/vendor/ftrack_api_old/formatter.py vendored Normal file
View file

@ -0,0 +1,131 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import termcolor
import ftrack_api_old.entity.base
import ftrack_api_old.collection
import ftrack_api_old.symbol
import ftrack_api_old.inspection
#: Useful filters to pass to :func:`format`.`
FILTER = {
'ignore_unset': (
lambda entity, name, value: value is not ftrack_api_old.symbol.NOT_SET
)
}
def format(
entity, formatters=None, attribute_filter=None, recursive=False,
indent=0, indent_first_line=True, _seen=None
):
'''Return formatted string representing *entity*.
*formatters* can be used to customise formatting of elements. It should be a
mapping with one or more of the following keys:
* header - Used to format entity type.
* label - Used to format attribute names.
Specify an *attribute_filter* to control which attributes to include. By
default all attributes are included. The *attribute_filter* should be a
callable that accepts `(entity, attribute_name, attribute_value)` and
returns True if the attribute should be included in the output. For example,
to filter out all unset values::
attribute_filter=ftrack_api_old.formatter.FILTER['ignore_unset']
If *recursive* is True then recurse into Collections and format each entity
present.
*indent* specifies the overall indentation in spaces of the formatted text,
whilst *indent_first_line* determines whether to apply that indent to the
first generated line.
.. warning::
Iterates over all *entity* attributes which may cause multiple queries
to the server. Turn off auto populating in the session to prevent this.
'''
# Initialise default formatters.
if formatters is None:
formatters = dict()
formatters.setdefault(
'header', lambda text: termcolor.colored(
text, 'white', 'on_blue', attrs=['bold']
)
)
formatters.setdefault(
'label', lambda text: termcolor.colored(
text, 'blue', attrs=['bold']
)
)
# Determine indents.
spacer = ' ' * indent
if indent_first_line:
first_line_spacer = spacer
else:
first_line_spacer = ''
# Avoid infinite recursion on circular references.
if _seen is None:
_seen = set()
identifier = str(ftrack_api_old.inspection.identity(entity))
if identifier in _seen:
return (
first_line_spacer +
formatters['header'](entity.entity_type) + '{...}'
)
_seen.add(identifier)
information = list()
information.append(
first_line_spacer + formatters['header'](entity.entity_type)
)
for key, value in sorted(entity.items()):
if attribute_filter is not None:
if not attribute_filter(entity, key, value):
continue
child_indent = indent + len(key) + 3
if isinstance(value, ftrack_api_old.entity.base.Entity):
value = format(
value,
formatters=formatters,
attribute_filter=attribute_filter,
recursive=recursive,
indent=child_indent,
indent_first_line=False,
_seen=_seen.copy()
)
if isinstance(value, ftrack_api_old.collection.Collection):
if recursive:
child_values = []
for index, child in enumerate(value):
child_value = format(
child,
formatters=formatters,
attribute_filter=attribute_filter,
recursive=recursive,
indent=child_indent,
indent_first_line=index != 0,
_seen=_seen.copy()
)
child_values.append(child_value)
value = '\n'.join(child_values)
information.append(
spacer + u' {0}: {1}'.format(formatters['label'](key), value)
)
return '\n'.join(information)

135
pype/vendor/ftrack_api_old/inspection.py vendored Normal file
View file

@ -0,0 +1,135 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import collections
import ftrack_api_old.symbol
import ftrack_api_old.operation
def identity(entity):
'''Return unique identity of *entity*.'''
return (
str(entity.entity_type),
primary_key(entity).values()
)
def primary_key(entity):
'''Return primary key of *entity* as an ordered mapping of {field: value}.
To get just the primary key values::
primary_key(entity).values()
'''
primary_key = collections.OrderedDict()
for name in entity.primary_key_attributes:
value = entity[name]
if value is ftrack_api_old.symbol.NOT_SET:
raise KeyError(
'Missing required value for primary key attribute "{0}" on '
'entity {1!r}.'.format(name, entity)
)
primary_key[str(name)] = str(value)
return primary_key
def _state(operation, state):
'''Return state following *operation* against current *state*.'''
if (
isinstance(
operation, ftrack_api_old.operation.CreateEntityOperation
)
and state is ftrack_api_old.symbol.NOT_SET
):
state = ftrack_api_old.symbol.CREATED
elif (
isinstance(
operation, ftrack_api_old.operation.UpdateEntityOperation
)
and state is ftrack_api_old.symbol.NOT_SET
):
state = ftrack_api_old.symbol.MODIFIED
elif isinstance(
operation, ftrack_api_old.operation.DeleteEntityOperation
):
state = ftrack_api_old.symbol.DELETED
return state
def state(entity):
'''Return current *entity* state.
.. seealso:: :func:`ftrack_api_old.inspection.states`.
'''
value = ftrack_api_old.symbol.NOT_SET
for operation in entity.session.recorded_operations:
# Determine if operation refers to an entity and whether that entity
# is *entity*.
if (
isinstance(
operation,
(
ftrack_api_old.operation.CreateEntityOperation,
ftrack_api_old.operation.UpdateEntityOperation,
ftrack_api_old.operation.DeleteEntityOperation
)
)
and operation.entity_type == entity.entity_type
and operation.entity_key == primary_key(entity)
):
value = _state(operation, value)
return value
def states(entities):
'''Return current states of *entities*.
An optimised function for determining states of multiple entities in one
go.
.. note::
All *entities* should belong to the same session.
.. seealso:: :func:`ftrack_api_old.inspection.state`.
'''
if not entities:
return []
session = entities[0].session
entities_by_identity = collections.OrderedDict()
for entity in entities:
key = (entity.entity_type, str(primary_key(entity).values()))
entities_by_identity[key] = ftrack_api_old.symbol.NOT_SET
for operation in session.recorded_operations:
if (
isinstance(
operation,
(
ftrack_api_old.operation.CreateEntityOperation,
ftrack_api_old.operation.UpdateEntityOperation,
ftrack_api_old.operation.DeleteEntityOperation
)
)
):
key = (operation.entity_type, str(operation.entity_key.values()))
if key not in entities_by_identity:
continue
value = _state(operation, entities_by_identity[key])
entities_by_identity[key] = value
return entities_by_identity.values()

26
pype/vendor/ftrack_api_old/logging.py vendored Normal file
View file

@ -0,0 +1,26 @@
# :coding: utf-8
# :copyright: Copyright (c) 2016 ftrack
class LazyLogMessage(object):
'''A log message that can be evaluated lazily for improved performance.
Example::
# Formatting of string will not occur unless debug logging enabled.
logger.debug(LazyLogMessage(
'Hello {0}', 'world'
))
'''
def __init__(self, message, *args, **kwargs):
'''Initialise with *message* format string and arguments.'''
self.message = message
self.args = args
self.kwargs = kwargs
def __str__(self):
'''Return string representation.'''
return self.message.format(*self.args, **self.kwargs)

115
pype/vendor/ftrack_api_old/operation.py vendored Normal file
View file

@ -0,0 +1,115 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import copy
class Operations(object):
'''Stack of operations.'''
def __init__(self):
'''Initialise stack.'''
self._stack = []
super(Operations, self).__init__()
def clear(self):
'''Clear all operations.'''
del self._stack[:]
def push(self, operation):
'''Push *operation* onto stack.'''
self._stack.append(operation)
def pop(self):
'''Pop and return most recent operation from stack.'''
return self._stack.pop()
def __len__(self):
'''Return count of operations.'''
return len(self._stack)
def __iter__(self):
'''Return iterator over operations.'''
return iter(self._stack)
class Operation(object):
'''Represent an operation.'''
class CreateEntityOperation(Operation):
'''Represent create entity operation.'''
def __init__(self, entity_type, entity_key, entity_data):
'''Initialise operation.
*entity_type* should be the type of entity in string form (as returned
from :attr:`ftrack_api_old.entity.base.Entity.entity_type`).
*entity_key* should be the unique key for the entity and should follow
the form returned from :func:`ftrack_api_old.inspection.primary_key`.
*entity_data* should be a mapping of the initial data to populate the
entity with when creating.
.. note::
Shallow copies will be made of each value in *entity_data*.
'''
super(CreateEntityOperation, self).__init__()
self.entity_type = entity_type
self.entity_key = entity_key
self.entity_data = {}
for key, value in entity_data.items():
self.entity_data[key] = copy.copy(value)
class UpdateEntityOperation(Operation):
'''Represent update entity operation.'''
def __init__(
self, entity_type, entity_key, attribute_name, old_value, new_value
):
'''Initialise operation.
*entity_type* should be the type of entity in string form (as returned
from :attr:`ftrack_api_old.entity.base.Entity.entity_type`).
*entity_key* should be the unique key for the entity and should follow
the form returned from :func:`ftrack_api_old.inspection.primary_key`.
*attribute_name* should be the string name of the attribute being
modified and *old_value* and *new_value* should reflect the change in
value.
.. note::
Shallow copies will be made of both *old_value* and *new_value*.
'''
super(UpdateEntityOperation, self).__init__()
self.entity_type = entity_type
self.entity_key = entity_key
self.attribute_name = attribute_name
self.old_value = copy.copy(old_value)
self.new_value = copy.copy(new_value)
class DeleteEntityOperation(Operation):
'''Represent delete entity operation.'''
def __init__(self, entity_type, entity_key):
'''Initialise operation.
*entity_type* should be the type of entity in string form (as returned
from :attr:`ftrack_api_old.entity.base.Entity.entity_type`).
*entity_key* should be the unique key for the entity and should follow
the form returned from :func:`ftrack_api_old.inspection.primary_key`.
'''
super(DeleteEntityOperation, self).__init__()
self.entity_type = entity_type
self.entity_key = entity_key

121
pype/vendor/ftrack_api_old/plugin.py vendored Normal file
View file

@ -0,0 +1,121 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import logging
import os
import uuid
import imp
import inspect
def discover(paths, positional_arguments=None, keyword_arguments=None):
'''Find and load plugins in search *paths*.
Each discovered module should implement a register function that accepts
*positional_arguments* and *keyword_arguments* as \*args and \*\*kwargs
respectively.
If a register function does not accept variable arguments, then attempt to
only pass accepted arguments to the function by inspecting its signature.
'''
logger = logging.getLogger(__name__ + '.discover')
if positional_arguments is None:
positional_arguments = []
if keyword_arguments is None:
keyword_arguments = {}
for path in paths:
# Ignore empty paths that could resolve to current directory.
path = path.strip()
if not path:
continue
for base, directories, filenames in os.walk(path):
for filename in filenames:
name, extension = os.path.splitext(filename)
if extension != '.py':
continue
module_path = os.path.join(base, filename)
unique_name = uuid.uuid4().hex
try:
module = imp.load_source(unique_name, module_path)
except Exception as error:
logger.warning(
'Failed to load plugin from "{0}": {1}'
.format(module_path, error)
)
continue
try:
module.register
except AttributeError:
logger.warning(
'Failed to load plugin that did not define a '
'"register" function at the module level: {0}'
.format(module_path)
)
else:
# Attempt to only pass arguments that are accepted by the
# register function.
specification = inspect.getargspec(module.register)
selected_positional_arguments = positional_arguments
selected_keyword_arguments = keyword_arguments
if (
not specification.varargs and
len(positional_arguments) > len(specification.args)
):
logger.warning(
'Culling passed arguments to match register '
'function signature.'
)
selected_positional_arguments = positional_arguments[
len(specification.args):
]
selected_keyword_arguments = {}
elif not specification.keywords:
# Remove arguments that have been passed as positionals.
remainder = specification.args[
len(positional_arguments):
]
# Determine remaining available keyword arguments.
defined_keyword_arguments = []
if specification.defaults:
defined_keyword_arguments = specification.args[
-len(specification.defaults):
]
remaining_keyword_arguments = set([
keyword_argument for keyword_argument
in defined_keyword_arguments
if keyword_argument in remainder
])
if not set(keyword_arguments.keys()).issubset(
remaining_keyword_arguments
):
logger.warning(
'Culling passed arguments to match register '
'function signature.'
)
selected_keyword_arguments = {
key: value
for key, value in keyword_arguments.items()
if key in remaining_keyword_arguments
}
module.register(
*selected_positional_arguments,
**selected_keyword_arguments
)

202
pype/vendor/ftrack_api_old/query.py vendored Normal file
View file

@ -0,0 +1,202 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import re
import collections
import ftrack_api_old.exception
class QueryResult(collections.Sequence):
'''Results from a query.'''
OFFSET_EXPRESSION = re.compile('(?P<offset>offset (?P<value>\d+))')
LIMIT_EXPRESSION = re.compile('(?P<limit>limit (?P<value>\d+))')
def __init__(self, session, expression, page_size=500):
'''Initialise result set.
*session* should be an instance of :class:`ftrack_api_old.session.Session`
that will be used for executing the query *expression*.
*page_size* should be an integer specifying the maximum number of
records to fetch in one request allowing the results to be fetched
incrementally in a transparent manner for optimal performance. Any
offset or limit specified in *expression* are honoured for final result
set, but intermediate queries may be issued with different offsets and
limits in order to fetch pages. When an embedded limit is smaller than
the given *page_size* it will be used instead and no paging will take
place.
.. warning::
Setting *page_size* to a very large amount may negatively impact
performance of not only the caller, but the server in general.
'''
super(QueryResult, self).__init__()
self._session = session
self._results = []
(
self._expression,
self._offset,
self._limit
) = self._extract_offset_and_limit(expression)
self._page_size = page_size
if self._limit is not None and self._limit < self._page_size:
# Optimise case where embedded limit is less than fetching a
# single page.
self._page_size = self._limit
self._next_offset = self._offset
if self._next_offset is None:
# Initialise with zero offset.
self._next_offset = 0
def _extract_offset_and_limit(self, expression):
'''Process *expression* extracting offset and limit.
Return (expression, offset, limit).
'''
offset = None
match = self.OFFSET_EXPRESSION.search(expression)
if match:
offset = int(match.group('value'))
expression = (
expression[:match.start('offset')] +
expression[match.end('offset'):]
)
limit = None
match = self.LIMIT_EXPRESSION.search(expression)
if match:
limit = int(match.group('value'))
expression = (
expression[:match.start('limit')] +
expression[match.end('limit'):]
)
return expression.strip(), offset, limit
def __getitem__(self, index):
'''Return value at *index*.'''
while self._can_fetch_more() and index >= len(self._results):
self._fetch_more()
return self._results[index]
def __len__(self):
'''Return number of items.'''
while self._can_fetch_more():
self._fetch_more()
return len(self._results)
def _can_fetch_more(self):
'''Return whether more results are available to fetch.'''
return self._next_offset is not None
def _fetch_more(self):
'''Fetch next page of results if available.'''
if not self._can_fetch_more():
return
expression = '{0} offset {1} limit {2}'.format(
self._expression, self._next_offset, self._page_size
)
records, metadata = self._session._query(expression)
self._results.extend(records)
if self._limit is not None and (len(self._results) >= self._limit):
# Original limit reached.
self._next_offset = None
del self._results[self._limit:]
else:
# Retrieve next page offset from returned metadata.
self._next_offset = metadata.get('next', {}).get('offset', None)
def all(self):
'''Fetch and return all data.'''
return list(self)
def one(self):
'''Return exactly one single result from query by applying a limit.
Raise :exc:`ValueError` if an existing limit is already present in the
expression.
Raise :exc:`ValueError` if an existing offset is already present in the
expression as offset is inappropriate when expecting a single item.
Raise :exc:`~ftrack_api_old.exception.MultipleResultsFoundError` if more
than one result was available or
:exc:`~ftrack_api_old.exception.NoResultFoundError` if no results were
available.
.. note::
Both errors subclass
:exc:`~ftrack_api_old.exception.IncorrectResultError` if you want to
catch only one error type.
'''
expression = self._expression
if self._limit is not None:
raise ValueError(
'Expression already contains a limit clause.'
)
if self._offset is not None:
raise ValueError(
'Expression contains an offset clause which does not make '
'sense when selecting a single item.'
)
# Apply custom limit as optimisation. A limit of 2 is used rather than
# 1 so that it is possible to test for multiple matching entries
# case.
expression += ' limit 2'
results, metadata = self._session._query(expression)
if not results:
raise ftrack_api_old.exception.NoResultFoundError()
if len(results) != 1:
raise ftrack_api_old.exception.MultipleResultsFoundError()
return results[0]
def first(self):
'''Return first matching result from query by applying a limit.
Raise :exc:`ValueError` if an existing limit is already present in the
expression.
If no matching result available return None.
'''
expression = self._expression
if self._limit is not None:
raise ValueError(
'Expression already contains a limit clause.'
)
# Apply custom offset if present.
if self._offset is not None:
expression += ' offset {0}'.format(self._offset)
# Apply custom limit as optimisation.
expression += ' limit 1'
results, metadata = self._session._query(expression)
if results:
return results[0]
return None

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,50 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
class ResourceIdentifierTransformer(object):
'''Transform resource identifiers.
Provide ability to modify resource identifier before it is stored centrally
(:meth:`encode`), or after it has been retrieved, but before it is used
locally (:meth:`decode`).
For example, you might want to decompose paths into a set of key, value
pairs to store centrally and then compose a path from those values when
reading back.
.. note::
This is separate from any transformations an
:class:`ftrack_api_old.accessor.base.Accessor` may perform and is targeted
towards common transformations.
'''
def __init__(self, session):
'''Initialise resource identifier transformer.
*session* should be the :class:`ftrack_api_old.session.Session` instance
to use for communication with the server.
'''
self.session = session
super(ResourceIdentifierTransformer, self).__init__()
def encode(self, resource_identifier, context=None):
'''Return encoded *resource_identifier* for storing centrally.
A mapping of *context* values may be supplied to guide the
transformation.
'''
return resource_identifier
def decode(self, resource_identifier, context=None):
'''Return decoded *resource_identifier* for use locally.
A mapping of *context* values may be supplied to guide the
transformation.
'''
return resource_identifier

2397
pype/vendor/ftrack_api_old/session.py vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,38 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from abc import ABCMeta, abstractmethod
class Structure(object):
'''Structure plugin interface.
A structure plugin should compute appropriate paths for data.
'''
__metaclass__ = ABCMeta
def __init__(self, prefix=''):
'''Initialise structure.'''
self.prefix = prefix
self.path_separator = '/'
super(Structure, self).__init__()
@abstractmethod
def get_resource_identifier(self, entity, context=None):
'''Return a resource identifier for supplied *entity*.
*context* can be a mapping that supplies additional information.
'''
def _get_sequence_expression(self, sequence):
'''Return a sequence expression for *sequence* component.'''
padding = sequence['padding']
if padding:
expression = '%0{0}d'.format(padding)
else:
expression = '%d'
return expression

Some files were not shown because too many files have changed in this diff Show more