added ftrack_api without _old suffix

This commit is contained in:
iLLiCiTiT 2020-12-22 11:58:22 +01:00
parent 0e920a3f66
commit cc8eb20f0e
46 changed files with 11691 additions and 0 deletions

View file

@ -0,0 +1,32 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from ._version import __version__
from .session import Session
def mixin(instance, mixin_class, name=None):
'''Mixin *mixin_class* to *instance*.
*name* can be used to specify new class name. If not specified then one will
be generated.
'''
if name is None:
name = '{0}{1}'.format(
instance.__class__.__name__, mixin_class.__name__
)
# Check mixin class not already present in mro in order to avoid consistent
# method resolution failure.
if mixin_class in instance.__class__.mro():
return
instance.__class__ = type(
name,
(
mixin_class,
instance.__class__
),
{}
)

View file

@ -0,0 +1,659 @@
# :coding: utf-8
# :copyright: Copyright (c) 2016 ftrack
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import object
import logging
import json
import sys
import os
import ftrack_api
import ftrack_api.structure.standard as _standard
from ftrack_api.logging import LazyLogMessage as L
scenario_name = 'ftrack.centralized-storage'
class ConfigureCentralizedStorageScenario(object):
'''Configure a centralized storage scenario.'''
def __init__(self):
'''Instansiate centralized storage scenario.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
@property
def storage_scenario(self):
'''Return storage scenario setting.'''
return self.session.query(
'select value from Setting '
'where name is "storage_scenario" and group is "STORAGE"'
).one()
@property
def existing_centralized_storage_configuration(self):
'''Return existing centralized storage configuration.'''
storage_scenario = self.storage_scenario
try:
configuration = json.loads(storage_scenario['value'])
except (ValueError, TypeError):
return None
if not isinstance(configuration, dict):
return None
if configuration.get('scenario') != scenario_name:
return None
return configuration.get('data', {})
def _get_confirmation_text(self, configuration):
'''Return confirmation text from *configuration*.'''
configure_location = configuration.get('configure_location')
select_location = configuration.get('select_location')
select_mount_point = configuration.get('select_mount_point')
if configure_location:
location_text = str(
'A new location will be created:\n\n'
'* Label: {location_label}\n'
'* Name: {location_name}\n'
'* Description: {location_description}\n'
).format(**configure_location)
else:
location = self.session.get(
'Location', select_location['location_id']
)
location_text = (
u'You have choosen to use an existing location: {0}'.format(
location['label']
)
)
mount_points_text = str(
'* Linux: {linux}\n'
'* OS X: {osx}\n'
'* Windows: {windows}\n\n'
).format(
linux=select_mount_point.get('linux_mount_point') or '*Not set*',
osx=select_mount_point.get('osx_mount_point') or '*Not set*',
windows=select_mount_point.get('windows_mount_point') or '*Not set*'
)
mount_points_not_set = []
if not select_mount_point.get('linux_mount_point'):
mount_points_not_set.append('Linux')
if not select_mount_point.get('osx_mount_point'):
mount_points_not_set.append('OS X')
if not select_mount_point.get('windows_mount_point'):
mount_points_not_set.append('Windows')
if mount_points_not_set:
mount_points_text += str(
'Please be aware that this location will not be working on '
'{missing} because the mount points are not set up.'
).format(
missing=' and '.join(mount_points_not_set)
)
text = str(
'#Confirm storage setup#\n\n'
'Almost there! Please take a moment to verify the settings you '
'are about to save. You can always come back later and update the '
'configuration.\n'
'##Location##\n\n'
'{location}\n'
'##Mount points##\n\n'
'{mount_points}'
).format(
location=location_text,
mount_points=mount_points_text
)
return text
def configure_scenario(self, event):
'''Configure scenario based on *event* and return form items.'''
steps = (
'select_scenario',
'select_location',
'configure_location',
'select_structure',
'select_mount_point',
'confirm_summary',
'save_configuration'
)
warning_message = ''
values = event['data'].get('values', {})
# Calculate previous step and the next.
previous_step = values.get('step', 'select_scenario')
next_step = steps[steps.index(previous_step) + 1]
state = 'configuring'
self.logger.info(L(
u'Configuring scenario, previous step: {0}, next step: {1}. '
u'Values {2!r}.',
previous_step, next_step, values
))
if 'configuration' in values:
configuration = values.pop('configuration')
else:
configuration = {}
if values:
# Update configuration with values from the previous step.
configuration[previous_step] = values
if previous_step == 'select_location':
values = configuration['select_location']
if values.get('location_id') != 'create_new_location':
location_exists = self.session.query(
'Location where id is "{0}"'.format(
values.get('location_id')
)
).first()
if not location_exists:
next_step = 'select_location'
warning_message = (
'**The selected location does not exist. Please choose '
'one from the dropdown or create a new one.**'
)
if next_step == 'select_location':
try:
location_id = (
self.existing_centralized_storage_configuration['location_id']
)
except (KeyError, TypeError):
location_id = None
options = [{
'label': 'Create new location',
'value': 'create_new_location'
}]
for location in self.session.query(
'select name, label, description from Location'
):
if location['name'] not in (
'ftrack.origin', 'ftrack.unmanaged', 'ftrack.connect',
'ftrack.server', 'ftrack.review'
):
options.append({
'label': u'{label} ({name})'.format(
label=location['label'], name=location['name']
),
'description': location['description'],
'value': location['id']
})
warning = ''
if location_id is not None:
# If there is already a location configured we must make the
# user aware that changing the location may be problematic.
warning = (
'\n\n**Be careful if you switch to another location '
'for an existing storage scenario. Components that have '
'already been published to the previous location will be '
'made unavailable for common use.**'
)
default_value = location_id
elif location_id is None and len(options) == 1:
# No location configured and no existing locations to use.
default_value = 'create_new_location'
else:
# There are existing locations to choose from but non of them
# are currently active in the centralized storage scenario.
default_value = None
items = [{
'type': 'label',
'value': (
'#Select location#\n'
'Choose an already existing location or create a new one '
'to represent your centralized storage. {0}'.format(
warning
)
)
}, {
'type': 'enumerator',
'label': 'Location',
'name': 'location_id',
'value': default_value,
'data': options
}]
default_location_name = 'studio.central-storage-location'
default_location_label = 'Studio location'
default_location_description = (
'The studio central location where all components are '
'stored.'
)
if previous_step == 'configure_location':
configure_location = configuration.get(
'configure_location'
)
if configure_location:
try:
existing_location = self.session.query(
u'Location where name is "{0}"'.format(
configure_location.get('location_name')
)
).first()
except UnicodeEncodeError:
next_step = 'configure_location'
warning_message += (
'**The location name contains non-ascii characters. '
'Please change the name and try again.**'
)
values = configuration['select_location']
else:
if existing_location:
next_step = 'configure_location'
warning_message += (
u'**There is already a location named {0}. '
u'Please change the name and try again.**'.format(
configure_location.get('location_name')
)
)
values = configuration['select_location']
if (
not configure_location.get('location_name') or
not configure_location.get('location_label') or
not configure_location.get('location_description')
):
next_step = 'configure_location'
warning_message += (
'**Location name, label and description cannot '
'be empty.**'
)
values = configuration['select_location']
if next_step == 'configure_location':
# Populate form with previous configuration.
default_location_label = configure_location['location_label']
default_location_name = configure_location['location_name']
default_location_description = (
configure_location['location_description']
)
if next_step == 'configure_location':
if values.get('location_id') == 'create_new_location':
# Add options to create a new location.
items = [{
'type': 'label',
'value': (
'#Create location#\n'
'Here you will create a new location to be used '
'with your new Storage scenario. For your '
'convenience we have already filled in some default '
'values. If this is the first time you are configuring '
'a storage scenario in ftrack we recommend that you '
'stick with these settings.'
)
}, {
'label': 'Label',
'name': 'location_label',
'value': default_location_label,
'type': 'text'
}, {
'label': 'Name',
'name': 'location_name',
'value': default_location_name,
'type': 'text'
}, {
'label': 'Description',
'name': 'location_description',
'value': default_location_description,
'type': 'text'
}]
else:
# The user selected an existing location. Move on to next
# step.
next_step = 'select_mount_point'
if next_step == 'select_structure':
# There is only one structure to choose from, go to next step.
next_step = 'select_mount_point'
# items = [
# {
# 'type': 'label',
# 'value': (
# '#Select structure#\n'
# 'Select which structure to use with your location. '
# 'The structure is used to generate the filesystem '
# 'path for components that are added to this location.'
# )
# },
# {
# 'type': 'enumerator',
# 'label': 'Structure',
# 'name': 'structure_id',
# 'value': 'standard',
# 'data': [{
# 'label': 'Standard',
# 'value': 'standard',
# 'description': (
# 'The Standard structure uses the names in your '
# 'project structure to determine the path.'
# )
# }]
# }
# ]
if next_step == 'select_mount_point':
try:
mount_points = (
self.existing_centralized_storage_configuration['accessor']['mount_points']
)
except (KeyError, TypeError):
mount_points = dict()
items = [
{
'value': (
'#Mount points#\n'
'Set mount points for your centralized storage '
'location. For the location to work as expected each '
'platform that you intend to use must have the '
'corresponding mount point set and the storage must '
'be accessible. If not set correctly files will not be '
'saved or read.'
),
'type': 'label'
}, {
'type': 'text',
'label': 'Linux',
'name': 'linux_mount_point',
'empty_text': 'E.g. /usr/mnt/MyStorage ...',
'value': mount_points.get('linux', '')
}, {
'type': 'text',
'label': 'OS X',
'name': 'osx_mount_point',
'empty_text': 'E.g. /Volumes/MyStorage ...',
'value': mount_points.get('osx', '')
}, {
'type': 'text',
'label': 'Windows',
'name': 'windows_mount_point',
'empty_text': 'E.g. \\\\MyStorage ...',
'value': mount_points.get('windows', '')
}
]
if next_step == 'confirm_summary':
items = [{
'type': 'label',
'value': self._get_confirmation_text(configuration)
}]
state = 'confirm'
if next_step == 'save_configuration':
mount_points = configuration['select_mount_point']
select_location = configuration['select_location']
if select_location['location_id'] == 'create_new_location':
configure_location = configuration['configure_location']
location = self.session.create(
'Location',
{
'name': configure_location['location_name'],
'label': configure_location['location_label'],
'description': (
configure_location['location_description']
)
}
)
else:
location = self.session.query(
'Location where id is "{0}"'.format(
select_location['location_id']
)
).one()
setting_value = json.dumps({
'scenario': scenario_name,
'data': {
'location_id': location['id'],
'location_name': location['name'],
'accessor': {
'mount_points': {
'linux': mount_points['linux_mount_point'],
'osx': mount_points['osx_mount_point'],
'windows': mount_points['windows_mount_point']
}
}
}
})
self.storage_scenario['value'] = setting_value
self.session.commit()
# Broadcast an event that storage scenario has been configured.
event = ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.configure-done'
)
self.session.event_hub.publish(event)
items = [{
'type': 'label',
'value': (
'#Done!#\n'
'Your storage scenario is now configured and ready '
'to use. **Note that you may have to restart Connect and '
'other applications to start using it.**'
)
}]
state = 'done'
if warning_message:
items.insert(0, {
'type': 'label',
'value': warning_message
})
items.append({
'type': 'hidden',
'value': configuration,
'name': 'configuration'
})
items.append({
'type': 'hidden',
'value': next_step,
'name': 'step'
})
return {
'items': items,
'state': state
}
def discover_centralized_scenario(self, event):
'''Return action discover dictionary for *event*.'''
return {
'id': scenario_name,
'name': 'Centralized storage scenario',
'description': (
'(Recommended) centralized storage scenario where all files '
'are kept on a storage that is mounted and available to '
'everyone in the studio.'
)
}
def register(self, session):
'''Subscribe to events on *session*.'''
self.session = session
#: TODO: Move these to a separate function.
session.event_hub.subscribe(
str(
'topic=ftrack.storage-scenario.discover '
'and source.user.username="{0}"'
).format(
session.api_user
),
self.discover_centralized_scenario
)
session.event_hub.subscribe(
str(
'topic=ftrack.storage-scenario.configure '
'and data.scenario_id="{0}" '
'and source.user.username="{1}"'
).format(
scenario_name,
session.api_user
),
self.configure_scenario
)
class ActivateCentralizedStorageScenario(object):
'''Activate a centralized storage scenario.'''
def __init__(self):
'''Instansiate centralized storage scenario.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
def activate(self, event):
'''Activate scenario in *event*.'''
storage_scenario = event['data']['storage_scenario']
try:
location_data = storage_scenario['data']
location_name = location_data['location_name']
location_id = location_data['location_id']
mount_points = location_data['accessor']['mount_points']
except KeyError:
error_message = (
'Unable to read storage scenario data.'
)
self.logger.error(L(error_message))
raise ftrack_api.exception.LocationError(
'Unable to configure location based on scenario.'
)
else:
location = self.session.create(
'Location',
data=dict(
name=location_name,
id=location_id
),
reconstructing=True
)
if 'darwin' in sys.platform:
prefix = mount_points['osx']
elif 'linux' in sys.platform:
prefix = mount_points['linux']
elif 'win' in sys.platform:
prefix = mount_points['windows']
else:
raise ftrack_api.exception.LocationError(
(
'Unable to find accessor prefix for platform {0}.'
).format(sys.platform)
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(
prefix=prefix
)
location.structure = _standard.StandardStructure()
location.priority = 1
self.logger.info(L(
u'Storage scenario activated. Configured {0!r} from '
u'{1!r}',
location, storage_scenario
))
def _verify_startup(self, event):
'''Verify the storage scenario configuration.'''
storage_scenario = event['data']['storage_scenario']
location_data = storage_scenario['data']
mount_points = location_data['accessor']['mount_points']
prefix = None
if 'darwin' in sys.platform:
prefix = mount_points['osx']
elif 'linux' in sys.platform:
prefix = mount_points['linux']
elif 'win' in sys.platform:
prefix = mount_points['windows']
if not prefix:
return (
u'The storage scenario has not been configured for your '
u'operating system. ftrack may not be able to '
u'store and track files correctly.'
)
if not os.path.isdir(prefix):
return (
str(
'The path {0} does not exist. ftrack may not be able to '
'store and track files correctly. \n\nIf the storage is '
'newly setup you may want to create necessary folder '
'structures. If the storage is a network drive you should '
'make sure that it is mounted correctly.'
).format(prefix)
)
def register(self, session):
'''Subscribe to events on *session*.'''
self.session = session
session.event_hub.subscribe(
(
'topic=ftrack.storage-scenario.activate '
'and data.storage_scenario.scenario="{0}"'.format(
scenario_name
)
),
self.activate
)
# Listen to verify startup event from ftrack connect to allow responding
# with a message if something is not working correctly with this
# scenario that the user should be notified about.
self.session.event_hub.subscribe(
(
'topic=ftrack.connect.verify-startup '
'and data.storage_scenario.scenario="{0}"'.format(
scenario_name
)
),
self._verify_startup
)
def register(session):
'''Register storage scenario.'''
scenario = ActivateCentralizedStorageScenario()
scenario.register(session)
def register_configuration(session):
'''Register storage scenario.'''
scenario = ConfigureCentralizedStorageScenario()
scenario.register(session)

View file

@ -0,0 +1,537 @@
# pragma: no cover
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
from __future__ import unicode_literals
from builtins import str
from builtins import zip
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, str) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, str):
cwd = os.getcwd()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, str):
path = os.getcwd()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass

View file

@ -0,0 +1,5 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
__version__ = '2.1.0'

View file

@ -0,0 +1,66 @@
"""
Yet another backport of WeakMethod for Python 2.7.
Changes include removing exception chaining and adding args to super() calls.
Copyright (c) 2001-2019 Python Software Foundation.All rights reserved.
Full license available in LICENSE.python.
"""
from weakref import ref
class WeakMethod(ref):
"""
A custom `weakref.ref` subclass which simulates a weak reference to
a bound method, working around the lifetime problem of bound methods.
"""
__slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__"
def __new__(cls, meth, callback=None):
try:
obj = meth.__self__
func = meth.__func__
except AttributeError:
raise TypeError(
"argument should be a bound method, not {}".format(type(meth))
)
def _cb(arg):
# The self-weakref trick is needed to avoid creating a reference
# cycle.
self = self_wr()
if self._alive:
self._alive = False
if callback is not None:
callback(self)
self = ref.__new__(cls, obj, _cb)
self._func_ref = ref(func, _cb)
self._meth_type = type(meth)
self._alive = True
self_wr = ref(self)
return self
def __call__(self):
obj = super(WeakMethod, self).__call__()
func = self._func_ref()
if obj is None or func is None:
return None
return self._meth_type(func, obj)
def __eq__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is other
return ref.__eq__(self, other) and self._func_ref == other._func_ref
return NotImplemented
def __ne__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is not other
return ref.__ne__(self, other) or self._func_ref != other._func_ref
return NotImplemented
__hash__ = ref.__hash__

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,124 @@
# :coding: utf-8
# :copyright: Copyright (c) 2013 ftrack
from builtins import object
import abc
import ftrack_api.exception
from future.utils import with_metaclass
class Accessor(with_metaclass(abc.ABCMeta, object)):
'''Provide data access to a location.
A location represents a specific storage, but access to that storage may
vary. For example, both local filesystem and FTP access may be possible for
the same storage. An accessor implements these different ways of accessing
the same data location.
As different accessors may access the same location, only part of a data
path that is commonly understood may be stored in the database. The format
of this path should be a contract between the accessors that require access
to the same location and is left as an implementation detail. As such, this
system provides no guarantee that two different accessors can provide access
to the same location, though this is a clear goal. The path stored centrally
is referred to as the **resource identifier** and should be used when
calling any of the accessor methods that accept a *resource_identifier*
argument.
'''
def __init__(self):
'''Initialise location accessor.'''
super(Accessor, self).__init__()
@abc.abstractmethod
def list(self, resource_identifier):
'''Return list of entries in *resource_identifier* container.
Each entry in the returned list should be a valid resource identifier.
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist or
:exc:`~ftrack_api.exception.AccessorResourceInvalidError` if
*resource_identifier* is not a container.
'''
@abc.abstractmethod
def exists(self, resource_identifier):
'''Return if *resource_identifier* is valid and exists in location.'''
@abc.abstractmethod
def is_file(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file.'''
@abc.abstractmethod
def is_container(self, resource_identifier):
'''Return whether *resource_identifier* refers to a container.'''
@abc.abstractmethod
def is_sequence(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file sequence.'''
@abc.abstractmethod
def open(self, resource_identifier, mode='rb'):
'''Return :class:`~ftrack_api.data.Data` for *resource_identifier*.'''
@abc.abstractmethod
def remove(self, resource_identifier):
'''Remove *resource_identifier*.
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist.
'''
@abc.abstractmethod
def make_container(self, resource_identifier, recursive=True):
'''Make a container at *resource_identifier*.
If *recursive* is True, also make any intermediate containers.
Should silently ignore existing containers and not recreate them.
'''
@abc.abstractmethod
def get_container(self, resource_identifier):
'''Return resource_identifier of container for *resource_identifier*.
Raise :exc:`~ftrack_api.exception.AccessorParentResourceNotFoundError`
if container of *resource_identifier* could not be determined.
'''
def remove_container(self, resource_identifier): # pragma: no cover
'''Remove container at *resource_identifier*.'''
return self.remove(resource_identifier)
def get_filesystem_path(self, resource_identifier): # pragma: no cover
'''Return filesystem path for *resource_identifier*.
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
filesystem path could not be determined from *resource_identifier* or
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
retrieving filesystem paths is not supported by this accessor.
'''
raise ftrack_api.exception.AccessorUnsupportedOperationError(
'get_filesystem_path', resource_identifier=resource_identifier
)
def get_url(self, resource_identifier):
'''Return URL for *resource_identifier*.
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
URL could not be determined from *resource_identifier* or
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
retrieving URL is not supported by this accessor.
'''
raise ftrack_api.exception.AccessorUnsupportedOperationError(
'get_url', resource_identifier=resource_identifier
)

View file

@ -0,0 +1,251 @@
# :coding: utf-8
# :copyright: Copyright (c) 2013 ftrack
import os
import sys
import errno
import contextlib
import ftrack_api._python_ntpath as ntpath
import ftrack_api.accessor.base
import ftrack_api.data
from ftrack_api.exception import (
AccessorFilesystemPathError,
AccessorUnsupportedOperationError,
AccessorResourceNotFoundError,
AccessorOperationFailedError,
AccessorPermissionDeniedError,
AccessorResourceInvalidError,
AccessorContainerNotEmptyError,
AccessorParentResourceNotFoundError
)
class DiskAccessor(ftrack_api.accessor.base.Accessor):
'''Provide disk access to a location.
Expect resource identifiers to refer to relative filesystem paths.
'''
def __init__(self, prefix, **kw):
'''Initialise location accessor.
*prefix* specifies the base folder for the disk based structure and
will be prepended to any path. It should be specified in the syntax of
the current OS.
'''
if prefix:
prefix = os.path.expanduser(os.path.expandvars(prefix))
prefix = os.path.abspath(prefix)
self.prefix = prefix
super(DiskAccessor, self).__init__(**kw)
def list(self, resource_identifier):
'''Return list of entries in *resource_identifier* container.
Each entry in the returned list should be a valid resource identifier.
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist or
:exc:`~ftrack_api.exception.AccessorResourceInvalidError` if
*resource_identifier* is not a container.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
with error_handler(
operation='list', resource_identifier=resource_identifier
):
listing = []
for entry in os.listdir(filesystem_path):
listing.append(os.path.join(resource_identifier, entry))
return listing
def exists(self, resource_identifier):
'''Return if *resource_identifier* is valid and exists in location.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
return os.path.exists(filesystem_path)
def is_file(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
return os.path.isfile(filesystem_path)
def is_container(self, resource_identifier):
'''Return whether *resource_identifier* refers to a container.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
return os.path.isdir(filesystem_path)
def is_sequence(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file sequence.'''
raise AccessorUnsupportedOperationError(operation='is_sequence')
def open(self, resource_identifier, mode='rb'):
'''Return :class:`~ftrack_api.Data` for *resource_identifier*.'''
filesystem_path = self.get_filesystem_path(resource_identifier)
with error_handler(
operation='open', resource_identifier=resource_identifier
):
data = ftrack_api.data.File(filesystem_path, mode)
return data
def remove(self, resource_identifier):
'''Remove *resource_identifier*.
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
*resource_identifier* does not exist.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
if self.is_file(resource_identifier):
with error_handler(
operation='remove', resource_identifier=resource_identifier
):
os.remove(filesystem_path)
elif self.is_container(resource_identifier):
with error_handler(
operation='remove', resource_identifier=resource_identifier
):
os.rmdir(filesystem_path)
else:
raise AccessorResourceNotFoundError(
resource_identifier=resource_identifier
)
def make_container(self, resource_identifier, recursive=True):
'''Make a container at *resource_identifier*.
If *recursive* is True, also make any intermediate containers.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
with error_handler(
operation='makeContainer', resource_identifier=resource_identifier
):
try:
if recursive:
os.makedirs(filesystem_path)
else:
try:
os.mkdir(filesystem_path)
except OSError as error:
if error.errno == errno.ENOENT:
raise AccessorParentResourceNotFoundError(
resource_identifier=resource_identifier
)
else:
raise
except OSError as error:
if error.errno != errno.EEXIST:
raise
def get_container(self, resource_identifier):
'''Return resource_identifier of container for *resource_identifier*.
Raise :exc:`~ftrack_api.exception.AccessorParentResourceNotFoundError` if
container of *resource_identifier* could not be determined.
'''
filesystem_path = self.get_filesystem_path(resource_identifier)
container = os.path.dirname(filesystem_path)
if self.prefix:
if not container.startswith(self.prefix):
raise AccessorParentResourceNotFoundError(
resource_identifier=resource_identifier,
message='Could not determine container for '
'{resource_identifier} as container falls outside '
'of configured prefix.'
)
# Convert container filesystem path into resource identifier.
container = container[len(self.prefix):]
if ntpath.isabs(container):
# Ensure that resulting path is relative by stripping any
# leftover prefixed slashes from string.
# E.g. If prefix was '/tmp' and path was '/tmp/foo/bar' the
# result will be 'foo/bar'.
container = container.lstrip('\\/')
return container
def get_filesystem_path(self, resource_identifier):
'''Return filesystem path for *resource_identifier*.
For example::
>>> accessor = DiskAccessor('my.location', '/mountpoint')
>>> print accessor.get_filesystem_path('test.txt')
/mountpoint/test.txt
>>> print accessor.get_filesystem_path('/mountpoint/test.txt')
/mountpoint/test.txt
Raise :exc:`ftrack_api.exception.AccessorFilesystemPathError` if filesystem
path could not be determined from *resource_identifier*.
'''
filesystem_path = resource_identifier
if filesystem_path:
filesystem_path = os.path.normpath(filesystem_path)
if self.prefix:
if not os.path.isabs(filesystem_path):
filesystem_path = os.path.normpath(
os.path.join(self.prefix, filesystem_path)
)
if not filesystem_path.startswith(self.prefix):
raise AccessorFilesystemPathError(
resource_identifier=resource_identifier,
message='Could not determine access path for '
'resource_identifier outside of configured prefix: '
'{resource_identifier}.'
)
return filesystem_path
@contextlib.contextmanager
def error_handler(**kw):
'''Conform raised OSError/IOError exception to appropriate FTrack error.'''
try:
yield
except (OSError, IOError) as error:
(exception_type, exception_value, traceback) = sys.exc_info()
kw.setdefault('error', error)
error_code = getattr(error, 'errno')
if not error_code:
raise AccessorOperationFailedError(**kw)
if error_code == errno.ENOENT:
raise AccessorResourceNotFoundError(**kw)
elif error_code == errno.EPERM:
raise AccessorPermissionDeniedError(**kw)
elif error_code == errno.ENOTEMPTY:
raise AccessorContainerNotEmptyError(**kw)
elif error_code in (errno.ENOTDIR, errno.EISDIR, errno.EINVAL):
raise AccessorResourceInvalidError(**kw)
else:
raise AccessorOperationFailedError(**kw)
except Exception:
raise

View file

@ -0,0 +1,240 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import os
import hashlib
import base64
import json
import requests
from .base import Accessor
from ..data import String
import ftrack_api.exception
import ftrack_api.symbol
class ServerFile(String):
'''Representation of a server file.'''
def __init__(self, resource_identifier, session, mode='rb'):
'''Initialise file.'''
self.mode = mode
self.resource_identifier = resource_identifier
self._session = session
self._has_read = False
super(ServerFile, self).__init__()
def flush(self):
'''Flush all changes.'''
super(ServerFile, self).flush()
if self.mode == 'wb':
self._write()
def read(self, limit=None):
'''Read file.'''
if not self._has_read:
self._read()
self._has_read = True
return super(ServerFile, self).read(limit)
def _read(self):
'''Read all remote content from key into wrapped_file.'''
position = self.tell()
self.seek(0)
response = requests.get(
'{0}/component/get'.format(self._session.server_url),
params={
'id': self.resource_identifier,
'username': self._session.api_user,
'apiKey': self._session.api_key
},
stream=True
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
raise ftrack_api.exception.AccessorOperationFailedError(
'Failed to read data: {0}.'.format(error)
)
for block in response.iter_content(ftrack_api.symbol.CHUNK_SIZE):
self.wrapped_file.write(block)
self.flush()
self.seek(position)
def _write(self):
'''Write current data to remote key.'''
position = self.tell()
self.seek(0)
# Retrieve component from cache to construct a filename.
component = self._session.get('FileComponent', self.resource_identifier)
if not component:
raise ftrack_api.exception.AccessorOperationFailedError(
'Unable to retrieve component with id: {0}.'.format(
self.resource_identifier
)
)
# Construct a name from component name and file_type.
name = component['name']
if component['file_type']:
name = u'{0}.{1}'.format(
name,
component['file_type'].lstrip('.')
)
try:
metadata = self._session.get_upload_metadata(
component_id=self.resource_identifier,
file_name=name,
file_size=self._get_size(),
checksum=self._compute_checksum()
)
except Exception as error:
raise ftrack_api.exception.AccessorOperationFailedError(
'Failed to get put metadata: {0}.'.format(error)
)
# Ensure at beginning of file before put.
self.seek(0)
# Put the file based on the metadata.
response = requests.put(
metadata['url'],
data=self.wrapped_file,
headers=metadata['headers']
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
raise ftrack_api.exception.AccessorOperationFailedError(
'Failed to put file to server: {0}.'.format(error)
)
self.seek(position)
def _get_size(self):
'''Return size of file in bytes.'''
position = self.tell()
self.seek(0, os.SEEK_END)
length = self.tell()
self.seek(position)
return length
def _compute_checksum(self):
'''Return checksum for file.'''
fp = self.wrapped_file
buf_size = ftrack_api.symbol.CHUNK_SIZE
hash_obj = hashlib.md5()
spos = fp.tell()
s = fp.read(buf_size)
while s:
hash_obj.update(s)
s = fp.read(buf_size)
base64_digest = base64.encodebytes(hash_obj.digest()).decode('utf-8')
if base64_digest[-1] == '\n':
base64_digest = base64_digest[0:-1]
fp.seek(spos)
return base64_digest
class _ServerAccessor(Accessor):
'''Provide server location access.'''
def __init__(self, session, **kw):
'''Initialise location accessor.'''
super(_ServerAccessor, self).__init__(**kw)
self._session = session
def open(self, resource_identifier, mode='rb'):
'''Return :py:class:`~ftrack_api.Data` for *resource_identifier*.'''
return ServerFile(resource_identifier, session=self._session, mode=mode)
def remove(self, resourceIdentifier):
'''Remove *resourceIdentifier*.'''
response = requests.get(
'{0}/component/remove'.format(self._session.server_url),
params={
'id': resourceIdentifier,
'username': self._session.api_user,
'apiKey': self._session.api_key
}
)
if response.status_code != 200:
raise ftrack_api.exception.AccessorOperationFailedError(
'Failed to remove file.'
)
def get_container(self, resource_identifier):
'''Return resource_identifier of container for *resource_identifier*.'''
return None
def make_container(self, resource_identifier, recursive=True):
'''Make a container at *resource_identifier*.'''
def list(self, resource_identifier):
'''Return list of entries in *resource_identifier* container.'''
raise NotImplementedError()
def exists(self, resource_identifier):
'''Return if *resource_identifier* is valid and exists in location.'''
return False
def is_file(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file.'''
raise NotImplementedError()
def is_container(self, resource_identifier):
'''Return whether *resource_identifier* refers to a container.'''
raise NotImplementedError()
def is_sequence(self, resource_identifier):
'''Return whether *resource_identifier* refers to a file sequence.'''
raise NotImplementedError()
def get_url(self, resource_identifier):
'''Return url for *resource_identifier*.'''
url_string = (
u'{url}/component/get?id={id}&username={username}'
u'&apiKey={apiKey}'
)
return url_string.format(
url=self._session.server_url,
id=resource_identifier,
username=self._session.api_user,
apiKey=self._session.api_key
)
def get_thumbnail_url(self, resource_identifier, size=None):
'''Return thumbnail url for *resource_identifier*.
Optionally, specify *size* to constrain the downscaled image to size
x size pixels.
'''
url_string = (
u'{url}/component/thumbnail?id={id}&username={username}'
u'&apiKey={apiKey}'
)
url = url_string.format(
url=self._session.server_url,
id=resource_identifier,
username=self._session.api_user,
apiKey=self._session.api_key
)
if size:
url += u'&size={0}'.format(size)
return url

View file

@ -0,0 +1,708 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
from builtins import object
import collections
import copy
import logging
import functools
import ftrack_api.symbol
import ftrack_api.exception
import ftrack_api.collection
import ftrack_api.inspection
import ftrack_api.operation
logger = logging.getLogger(
__name__
)
def merge_references(function):
'''Decorator to handle merging of references / collections.'''
@functools.wraps(function)
def get_value(attribute, entity):
'''Merge the attribute with the local cache.'''
if attribute.name not in entity._inflated:
# Only merge on first access to avoid
# inflating them multiple times.
logger.debug(
'Merging potential new data into attached '
'entity for attribute {0}.'.format(
attribute.name
)
)
# Local attributes.
local_value = attribute.get_local_value(entity)
if isinstance(
local_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
logger.debug(
'Merging local value for attribute {0}.'.format(attribute)
)
merged_local_value = entity.session._merge(
local_value, merged=dict()
)
if merged_local_value is not local_value:
with entity.session.operation_recording(False):
attribute.set_local_value(entity, merged_local_value)
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
merged_remote_value = entity.session._merge(
remote_value, merged=dict()
)
if merged_remote_value is not remote_value:
attribute.set_remote_value(entity, merged_remote_value)
entity._inflated.add(
attribute.name
)
return function(
attribute, entity
)
return get_value
class Attributes(object):
'''Collection of properties accessible by name.'''
def __init__(self, attributes=None):
super(Attributes, self).__init__()
self._data = dict()
if attributes is not None:
for attribute in attributes:
self.add(attribute)
def add(self, attribute):
'''Add *attribute*.'''
existing = self._data.get(attribute.name, None)
if existing:
raise ftrack_api.exception.NotUniqueError(
'Attribute with name {0} already added as {1}'
.format(attribute.name, existing)
)
self._data[attribute.name] = attribute
def remove(self, attribute):
'''Remove attribute.'''
self._data.pop(attribute.name)
def get(self, name):
'''Return attribute by *name*.
If no attribute matches *name* then return None.
'''
return self._data.get(name, None)
def keys(self):
'''Return list of attribute names.'''
return list(self._data.keys())
def __contains__(self, item):
'''Return whether *item* present.'''
if not isinstance(item, Attribute):
return False
return item.name in self._data
def __iter__(self):
'''Return iterator over attributes.'''
return iter(self._data.values())
def __len__(self):
'''Return count of attributes.'''
return len(self._data)
class Attribute(object):
'''A name and value pair persisted remotely.'''
def __init__(
self, name, default_value=ftrack_api.symbol.NOT_SET, mutable=True,
computed=False
):
'''Initialise attribute with *name*.
*default_value* represents the default value for the attribute. It may
be a callable. It is not used within the attribute when providing
values, but instead exists for other parts of the system to reference.
If *mutable* is set to False then the local value of the attribute on an
entity can only be set when both the existing local and remote values
are :attr:`ftrack_api.symbol.NOT_SET`. The exception to this is when the
target value is also :attr:`ftrack_api.symbol.NOT_SET`.
If *computed* is set to True the value is a remote side computed value
and should not be long-term cached.
'''
super(Attribute, self).__init__()
self._name = name
self._mutable = mutable
self._computed = computed
self.default_value = default_value
self._local_key = 'local'
self._remote_key = 'remote'
def __repr__(self):
'''Return representation of entity.'''
return '<{0}.{1}({2}) object at {3}>'.format(
self.__module__,
self.__class__.__name__,
self.name,
id(self)
)
def get_entity_storage(self, entity):
'''Return attribute storage on *entity* creating if missing.'''
storage_key = '_ftrack_attribute_storage'
storage = getattr(entity, storage_key, None)
if storage is None:
storage = collections.defaultdict(
lambda:
{
self._local_key: ftrack_api.symbol.NOT_SET,
self._remote_key: ftrack_api.symbol.NOT_SET
}
)
setattr(entity, storage_key, storage)
return storage
@property
def name(self):
'''Return name.'''
return self._name
@property
def mutable(self):
'''Return whether attribute is mutable.'''
return self._mutable
@property
def computed(self):
'''Return whether attribute is computed.'''
return self._computed
def get_value(self, entity):
'''Return current value for *entity*.
If a value was set locally then return it, otherwise return last known
remote value. If no remote value yet retrieved, make a request for it
via the session and block until available.
'''
value = self.get_local_value(entity)
if value is not ftrack_api.symbol.NOT_SET:
return value
value = self.get_remote_value(entity)
if value is not ftrack_api.symbol.NOT_SET:
return value
if not entity.session.auto_populate:
return value
self.populate_remote_value(entity)
return self.get_remote_value(entity)
def get_local_value(self, entity):
'''Return locally set value for *entity*.'''
storage = self.get_entity_storage(entity)
return storage[self.name][self._local_key]
def get_remote_value(self, entity):
'''Return remote value for *entity*.
.. note::
Only return locally stored remote value, do not fetch from remote.
'''
storage = self.get_entity_storage(entity)
return storage[self.name][self._remote_key]
def set_local_value(self, entity, value):
'''Set local *value* for *entity*.'''
if (
not self.mutable
and self.is_set(entity)
and value is not ftrack_api.symbol.NOT_SET
):
raise ftrack_api.exception.ImmutableAttributeError(self)
old_value = self.get_local_value(entity)
storage = self.get_entity_storage(entity)
storage[self.name][self._local_key] = value
# Record operation.
if entity.session.record_operations:
entity.session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity),
self.name,
old_value,
value
)
)
def set_remote_value(self, entity, value):
'''Set remote *value*.
.. note::
Only set locally stored remote value, do not persist to remote.
'''
storage = self.get_entity_storage(entity)
storage[self.name][self._remote_key] = value
def populate_remote_value(self, entity):
'''Populate remote value for *entity*.'''
entity.session.populate([entity], self.name)
def is_modified(self, entity):
'''Return whether local value set and differs from remote.
.. note::
Will not fetch remote value so may report True even when values
are the same on the remote.
'''
local_value = self.get_local_value(entity)
remote_value = self.get_remote_value(entity)
return (
local_value is not ftrack_api.symbol.NOT_SET
and local_value != remote_value
)
def is_set(self, entity):
'''Return whether a value is set for *entity*.'''
return any([
self.get_local_value(entity) is not ftrack_api.symbol.NOT_SET,
self.get_remote_value(entity) is not ftrack_api.symbol.NOT_SET
])
class ScalarAttribute(Attribute):
'''Represent a scalar value.'''
def __init__(self, name, data_type, **kw):
'''Initialise property.'''
super(ScalarAttribute, self).__init__(name, **kw)
self.data_type = data_type
class ReferenceAttribute(Attribute):
'''Reference another entity.'''
def __init__(self, name, entity_type, **kw):
'''Initialise property.'''
super(ReferenceAttribute, self).__init__(name, **kw)
self.entity_type = entity_type
def populate_remote_value(self, entity):
'''Populate remote value for *entity*.
As attribute references another entity, use that entity's configured
default projections to auto populate useful attributes when loading.
'''
reference_entity_type = entity.session.types[self.entity_type]
default_projections = reference_entity_type.default_projections
projections = []
if default_projections:
for projection in default_projections:
projections.append('{0}.{1}'.format(self.name, projection))
else:
projections.append(self.name)
entity.session.populate([entity], ', '.join(projections))
def is_modified(self, entity):
'''Return whether a local value has been set and differs from remote.
.. note::
Will not fetch remote value so may report True even when values
are the same on the remote.
'''
local_value = self.get_local_value(entity)
remote_value = self.get_remote_value(entity)
if local_value is ftrack_api.symbol.NOT_SET:
return False
if remote_value is ftrack_api.symbol.NOT_SET:
return True
if (
ftrack_api.inspection.identity(local_value)
!= ftrack_api.inspection.identity(remote_value)
):
return True
return False
@merge_references
def get_value(self, entity):
return super(ReferenceAttribute, self).get_value(
entity
)
class AbstractCollectionAttribute(Attribute):
'''Base class for collection attributes.'''
#: Collection class used by attribute.
collection_class = None
@merge_references
def get_value(self, entity):
'''Return current value for *entity*.
If a value was set locally then return it, otherwise return last known
remote value. If no remote value yet retrieved, make a request for it
via the session and block until available.
.. note::
As value is a collection that is mutable, will transfer a remote
value into the local value on access if no local value currently
set.
'''
super(AbstractCollectionAttribute, self).get_value(entity)
# Conditionally, copy remote value into local value so that it can be
# mutated without side effects.
local_value = self.get_local_value(entity)
remote_value = self.get_remote_value(entity)
if (
local_value is ftrack_api.symbol.NOT_SET
and isinstance(remote_value, self.collection_class)
):
try:
with entity.session.operation_recording(False):
self.set_local_value(entity, copy.copy(remote_value))
except ftrack_api.exception.ImmutableAttributeError:
pass
value = self.get_local_value(entity)
# If the local value is still not set then attempt to set it with a
# suitable placeholder collection so that the caller can interact with
# the collection using its normal interface. This is required for a
# newly created entity for example. It *could* be done as a simple
# default value, but that would incur cost for every collection even
# when they are not modified before commit.
if value is ftrack_api.symbol.NOT_SET:
try:
with entity.session.operation_recording(False):
self.set_local_value(
entity,
# None should be treated as empty collection.
None
)
except ftrack_api.exception.ImmutableAttributeError:
pass
return self.get_local_value(entity)
def set_local_value(self, entity, value):
'''Set local *value* for *entity*.'''
if value is not ftrack_api.symbol.NOT_SET:
value = self._adapt_to_collection(entity, value)
value.mutable = self.mutable
super(AbstractCollectionAttribute, self).set_local_value(entity, value)
def set_remote_value(self, entity, value):
'''Set remote *value*.
.. note::
Only set locally stored remote value, do not persist to remote.
'''
if value is not ftrack_api.symbol.NOT_SET:
value = self._adapt_to_collection(entity, value)
value.mutable = False
super(AbstractCollectionAttribute, self).set_remote_value(entity, value)
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to appropriate collection instance for *entity*.
.. note::
If *value* is None then return a suitable empty collection.
'''
raise NotImplementedError()
class CollectionAttribute(AbstractCollectionAttribute):
'''Represent a collection of other entities.'''
#: Collection class used by attribute.
collection_class = ftrack_api.collection.Collection
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to a Collection instance on *entity*.'''
if not isinstance(value, ftrack_api.collection.Collection):
if value is None:
value = ftrack_api.collection.Collection(entity, self)
elif isinstance(value, list):
value = ftrack_api.collection.Collection(
entity, self, data=value
)
else:
raise NotImplementedError(
'Cannot convert {0!r} to collection.'.format(value)
)
else:
if value.attribute is not self:
raise ftrack_api.exception.AttributeError(
'Collection already bound to a different attribute'
)
return value
class KeyValueMappedCollectionAttribute(AbstractCollectionAttribute):
'''Represent a mapped key, value collection of entities.'''
#: Collection class used by attribute.
collection_class = ftrack_api.collection.KeyValueMappedCollectionProxy
def __init__(
self, name, creator, key_attribute, value_attribute, **kw
):
'''Initialise attribute with *name*.
*creator* should be a function that accepts a dictionary of data and
is used by the referenced collection to create new entities in the
collection.
*key_attribute* should be the name of the attribute on an entity in
the collection that represents the value for 'key' of the dictionary.
*value_attribute* should be the name of the attribute on an entity in
the collection that represents the value for 'value' of the dictionary.
'''
self.creator = creator
self.key_attribute = key_attribute
self.value_attribute = value_attribute
super(KeyValueMappedCollectionAttribute, self).__init__(name, **kw)
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to an *entity*.'''
if not isinstance(
value, ftrack_api.collection.KeyValueMappedCollectionProxy
):
if value is None:
value = ftrack_api.collection.KeyValueMappedCollectionProxy(
ftrack_api.collection.Collection(entity, self),
self.creator, self.key_attribute,
self.value_attribute
)
elif isinstance(value, (list, ftrack_api.collection.Collection)):
if isinstance(value, list):
value = ftrack_api.collection.Collection(
entity, self, data=value
)
value = ftrack_api.collection.KeyValueMappedCollectionProxy(
value, self.creator, self.key_attribute,
self.value_attribute
)
elif isinstance(value, collections.Mapping):
# Convert mapping.
# TODO: When backend model improves, revisit this logic.
# First get existing value and delete all references. This is
# needed because otherwise they will not be automatically
# removed server side.
# The following should not cause recursion as the internal
# values should be mapped collections already.
current_value = self.get_value(entity)
if not isinstance(
current_value,
ftrack_api.collection.KeyValueMappedCollectionProxy
):
raise NotImplementedError(
'Cannot adapt mapping to collection as current value '
'type is not a KeyValueMappedCollectionProxy.'
)
# Create the new collection using the existing collection as
# basis. Then update through proxy interface to ensure all
# internal operations called consistently (such as entity
# deletion for key removal).
collection = ftrack_api.collection.Collection(
entity, self, data=current_value.collection[:]
)
collection_proxy = (
ftrack_api.collection.KeyValueMappedCollectionProxy(
collection, self.creator,
self.key_attribute, self.value_attribute
)
)
# Remove expired keys from collection.
expired_keys = set(current_value.keys()) - set(value.keys())
for key in expired_keys:
del collection_proxy[key]
# Set new values for existing keys / add new keys.
for key, value in list(value.items()):
collection_proxy[key] = value
value = collection_proxy
else:
raise NotImplementedError(
'Cannot convert {0!r} to collection.'.format(value)
)
else:
if value.attribute is not self:
raise ftrack_api.exception.AttributeError(
'Collection already bound to a different attribute.'
)
return value
class CustomAttributeCollectionAttribute(AbstractCollectionAttribute):
'''Represent a mapped custom attribute collection of entities.'''
#: Collection class used by attribute.
collection_class = (
ftrack_api.collection.CustomAttributeCollectionProxy
)
def _adapt_to_collection(self, entity, value):
'''Adapt *value* to an *entity*.'''
if not isinstance(
value, ftrack_api.collection.CustomAttributeCollectionProxy
):
if value is None:
value = ftrack_api.collection.CustomAttributeCollectionProxy(
ftrack_api.collection.Collection(entity, self)
)
elif isinstance(value, (list, ftrack_api.collection.Collection)):
# Why are we creating a new if it is a list? This will cause
# any merge to create a new proxy and collection.
if isinstance(value, list):
value = ftrack_api.collection.Collection(
entity, self, data=value
)
value = ftrack_api.collection.CustomAttributeCollectionProxy(
value
)
elif isinstance(value, collections.Mapping):
# Convert mapping.
# TODO: When backend model improves, revisit this logic.
# First get existing value and delete all references. This is
# needed because otherwise they will not be automatically
# removed server side.
# The following should not cause recursion as the internal
# values should be mapped collections already.
current_value = self.get_value(entity)
if not isinstance(
current_value,
ftrack_api.collection.CustomAttributeCollectionProxy
):
raise NotImplementedError(
'Cannot adapt mapping to collection as current value '
'type is not a MappedCollectionProxy.'
)
# Create the new collection using the existing collection as
# basis. Then update through proxy interface to ensure all
# internal operations called consistently (such as entity
# deletion for key removal).
collection = ftrack_api.collection.Collection(
entity, self, data=current_value.collection[:]
)
collection_proxy = (
ftrack_api.collection.CustomAttributeCollectionProxy(
collection
)
)
# Remove expired keys from collection.
expired_keys = set(current_value.keys()) - set(value.keys())
for key in expired_keys:
del collection_proxy[key]
# Set new values for existing keys / add new keys.
for key, value in list(value.items()):
collection_proxy[key] = value
value = collection_proxy
else:
raise NotImplementedError(
'Cannot convert {0!r} to collection.'.format(value)
)
else:
if value.attribute is not self:
raise ftrack_api.exception.AttributeError(
'Collection already bound to a different attribute.'
)
return value

View file

@ -0,0 +1,608 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
'''Caching framework.
Defines a standardised :class:`Cache` interface for storing data against
specific keys. Key generation is also standardised using a :class:`KeyMaker`
interface.
Combining a Cache and KeyMaker allows for memoisation of function calls with
respect to the arguments used by using a :class:`Memoiser`.
As a convenience a simple :func:`memoise` decorator is included for quick
memoisation of function using a global cache and standard key maker.
'''
from future import standard_library
standard_library.install_aliases()
from builtins import str
from six import string_types
from builtins import object
import collections
import functools
import abc
import copy
import inspect
import re
import six
try:
# Python 2.x
import anydbm
except ImportError:
import dbm as anydbm
import contextlib
from future.utils import with_metaclass
try:
try:
import _pickle as pickle
except:
import six
from six.moves import cPickle as pickle
except:
try:
import cPickle as pickle
except:
import pickle
import ftrack_api.inspection
import ftrack_api.symbol
class Cache(with_metaclass(abc.ABCMeta, object)):
'''Cache interface.
Derive from this to define concrete cache implementations. A cache is
centered around the concept of key:value pairings where the key is unique
across the cache.
'''
@abc.abstractmethod
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
@abc.abstractmethod
def set(self, key, value):
'''Set *value* for *key*.'''
@abc.abstractmethod
def remove(self, key):
'''Remove *key* and return stored value.
Raise :exc:`KeyError` if *key* not found.
'''
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
raise NotImplementedError() # pragma: no cover
def values(self):
'''Return values for current keys.'''
values = []
for key in list(self.keys()):
try:
value = self.get(key)
except KeyError:
continue
else:
values.append(value)
return values
def clear(self, pattern=None):
'''Remove all keys matching *pattern*.
*pattern* should be a regular expression string.
If *pattern* is None then all keys will be removed.
'''
if pattern is not None:
pattern = re.compile(pattern)
for key in list(self.keys()):
if pattern is not None:
if not pattern.search(key):
continue
try:
self.remove(key)
except KeyError:
pass
class ProxyCache(Cache):
'''Proxy another cache.'''
def __init__(self, proxied):
'''Initialise cache with *proxied* cache instance.'''
self.proxied = proxied
super(ProxyCache, self).__init__()
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
return self.proxied.get(key)
def set(self, key, value):
'''Set *value* for *key*.'''
return self.proxied.set(key, value)
def remove(self, key):
'''Remove *key* and return stored value.
Raise :exc:`KeyError` if *key* not found.
'''
return self.proxied.remove(key)
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
return list(self.proxied.keys())
class LayeredCache(Cache):
'''Layered cache.'''
def __init__(self, caches):
'''Initialise cache with *caches*.'''
super(LayeredCache, self).__init__()
self.caches = caches
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
Attempt to retrieve from cache layers in turn, starting with shallowest.
If value retrieved, then also set the value in each higher level cache
up from where retrieved.
'''
target_caches = []
value = ftrack_api.symbol.NOT_SET
for cache in self.caches:
try:
value = cache.get(key)
except KeyError:
target_caches.append(cache)
continue
else:
break
if value is ftrack_api.symbol.NOT_SET:
raise KeyError(key)
# Set value on all higher level caches.
for cache in target_caches:
cache.set(key, value)
return value
def set(self, key, value):
'''Set *value* for *key*.'''
for cache in self.caches:
cache.set(key, value)
def remove(self, key):
'''Remove *key*.
Raise :exc:`KeyError` if *key* not found in any layer.
'''
removed = False
for cache in self.caches:
try:
cache.remove(key)
except KeyError:
pass
else:
removed = True
if not removed:
raise KeyError(key)
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
keys = []
for cache in self.caches:
keys.extend(list(cache.keys()))
return list(set(keys))
class MemoryCache(Cache):
'''Memory based cache.'''
def __init__(self):
'''Initialise cache.'''
self._cache = {}
super(MemoryCache, self).__init__()
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
return self._cache[key]
def set(self, key, value):
'''Set *value* for *key*.'''
self._cache[key] = value
def remove(self, key):
'''Remove *key*.
Raise :exc:`KeyError` if *key* not found.
'''
del self._cache[key]
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
return list(self._cache.keys())
class FileCache(Cache):
'''File based cache that uses :mod:`anydbm` module.
.. note::
No locking of the underlying file is performed.
'''
def __init__(self, path):
'''Initialise cache at *path*.'''
self.path = path
# Initialise cache.
cache = anydbm.open(self.path, 'c')
cache.close()
super(FileCache, self).__init__()
@contextlib.contextmanager
def _database(self):
'''Yield opened database file.'''
cache = anydbm.open(self.path, 'w')
try:
yield cache
finally:
cache.close()
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
with self._database() as cache:
return cache[key.encode('ascii')].decode('utf-8')
def set(self, key, value):
'''Set *value* for *key*.'''
with self._database() as cache:
cache[key.encode('ascii')] = value
def remove(self, key):
'''Remove *key*.
Raise :exc:`KeyError` if *key* not found.
'''
with self._database() as cache:
del cache[key.encode('ascii')]
def keys(self):
'''Return list of keys at this current time.
.. warning::
Actual keys may differ from those returned due to timing of access.
'''
with self._database() as cache:
return [s.decode('utf-8') for s in cache.keys()]
#return list(map(str, cache.keys()))
class SerialisedCache(ProxyCache):
'''Proxied cache that stores values as serialised data.'''
def __init__(self, proxied, encode=None, decode=None):
'''Initialise cache with *encode* and *decode* callables.
*proxied* is the underlying cache to use for storage.
'''
self.encode = encode
self.decode = decode
super(SerialisedCache, self).__init__(proxied)
def get(self, key):
'''Return value for *key*.
Raise :exc:`KeyError` if *key* not found.
'''
value = super(SerialisedCache, self).get(key)
if self.decode:
value = self.decode(value)
return value
def set(self, key, value):
'''Set *value* for *key*.'''
if self.encode:
value = self.encode(value)
super(SerialisedCache, self).set(key, value)
class KeyMaker(with_metaclass(abc.ABCMeta, object)):
'''Generate unique keys.'''
def __init__(self):
'''Initialise key maker.'''
super(KeyMaker, self).__init__()
self.item_separator = ''
def key(self, *items):
'''Return key for *items*.'''
keys = []
for item in items:
keys.append(self._key(item))
return self.item_separator.join(keys)
@abc.abstractmethod
def _key(self, obj):
'''Return key for *obj*.'''
class StringKeyMaker(KeyMaker):
'''Generate string key.'''
def _key(self, obj):
'''Return key for *obj*.'''
return str(obj)
class ObjectKeyMaker(KeyMaker):
'''Generate unique keys for objects.'''
def __init__(self):
'''Initialise key maker.'''
super(ObjectKeyMaker, self).__init__()
self.item_separator = b'\0'
self.mapping_identifier = b'\1'
self.mapping_pair_separator = b'\2'
self.iterable_identifier = b'\3'
self.name_identifier = b'\4'
def _key(self, item):
return self.__key(item)
def __key(self, item):
'''Return key for *item*.
Returned key will be a pickle like string representing the *item*. This
allows for typically non-hashable objects to be used in key generation
(such as dictionaries).
If *item* is iterable then each item in it shall also be passed to this
method to ensure correct key generation.
Special markers are used to distinguish handling of specific cases in
order to ensure uniqueness of key corresponds directly to *item*.
Example::
>>> key_maker = ObjectKeyMaker()
>>> def add(x, y):
... "Return sum of *x* and *y*."
... return x + y
...
>>> key_maker.key(add, (1, 2))
'\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x02.\x03'
>>> key_maker.key(add, (1, 3))
'\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x03.\x03'
'''
# Ensure p3k uses a protocol available in py2 so can decode it.
pickle_protocol = 2
# TODO: Consider using a more robust and comprehensive solution such as
# dill (https://github.com/uqfoundation/dill).
if isinstance(item, collections.Iterable):
if isinstance(item, string_types):
return pickle.dumps(item, pickle_protocol)
if isinstance(item, collections.Mapping):
contents = self.item_separator.join([
(
self._key(key) +
self.mapping_pair_separator +
self._key(value)
)
for key, value in sorted(item.items())
])
return (
self.mapping_identifier +
contents +
self.mapping_identifier
)
else:
contents = self.item_separator.join([
self._key(item) for item in item
])
return (
self.iterable_identifier +
contents +
self.iterable_identifier
)
elif inspect.ismethod(item):
return b''.join((
self.name_identifier,
item.__name__.encode(),
self.item_separator,
item.__self__.__class__.__name__.encode(),
self.item_separator,
item.__module__.encode()
))
elif inspect.isfunction(item) or inspect.isclass(item):
return b''.join((
self.name_identifier,
item.__name__.encode(),
self.item_separator,
item.__module__.encode()
))
elif inspect.isbuiltin(item):
return self.name_identifier + item.__name__.encode()
else:
return pickle.dumps(item, pickle_protocol)
class Memoiser(object):
'''Memoise function calls using a :class:`KeyMaker` and :class:`Cache`.
Example::
>>> memoiser = Memoiser(MemoryCache(), ObjectKeyMaker())
>>> def add(x, y):
... "Return sum of *x* and *y*."
... print 'Called'
... return x + y
...
>>> memoiser.call(add, (1, 2), {})
Called
>>> memoiser.call(add, (1, 2), {})
>>> memoiser.call(add, (1, 3), {})
Called
'''
def __init__(self, cache=None, key_maker=None, return_copies=True):
'''Initialise with *cache* and *key_maker* to use.
If *cache* is not specified a default :class:`MemoryCache` will be
used. Similarly, if *key_maker* is not specified a default
:class:`ObjectKeyMaker` will be used.
If *return_copies* is True then all results returned from the cache will
be deep copies to avoid indirect mutation of cached values.
'''
self.cache = cache
if self.cache is None:
self.cache = MemoryCache()
self.key_maker = key_maker
if self.key_maker is None:
self.key_maker = ObjectKeyMaker()
self.return_copies = return_copies
super(Memoiser, self).__init__()
def call(self, function, args=None, kw=None):
'''Call *function* with *args* and *kw* and return result.
If *function* was previously called with exactly the same arguments
then return cached result if available.
Store result for call in cache.
'''
if args is None:
args = ()
if kw is None:
kw = {}
# Support arguments being passed as positionals or keywords.
arguments = inspect.getcallargs(function, *args, **kw)
key = self.key_maker.key(function, arguments)
try:
value = self.cache.get(key)
except KeyError:
value = function(*args, **kw)
self.cache.set(key, value)
# If requested, deep copy value to return in order to avoid cached value
# being inadvertently altered by the caller.
if self.return_copies:
value = copy.deepcopy(value)
return value
def memoise_decorator(memoiser):
'''Decorator to memoise function calls using *memoiser*.'''
def outer(function):
@functools.wraps(function)
def inner(*args, **kw):
return memoiser.call(function, args, kw)
return inner
return outer
#: Default memoiser.
memoiser = Memoiser()
#: Default memoise decorator using standard cache and key maker.
memoise = memoise_decorator(memoiser)

View file

@ -0,0 +1,515 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
import logging
import collections
import copy
import ftrack_api.exception
import ftrack_api.inspection
import ftrack_api.symbol
import ftrack_api.operation
import ftrack_api.cache
from ftrack_api.logging import LazyLogMessage as L
class Collection(collections.MutableSequence):
'''A collection of entities.'''
def __init__(self, entity, attribute, mutable=True, data=None):
'''Initialise collection.'''
self.entity = entity
self.attribute = attribute
self._data = []
self._identities = set()
# Set initial dataset.
# Note: For initialisation, immutability is deferred till after initial
# population as otherwise there would be no public way to initialise an
# immutable collection. The reason self._data is not just set directly
# is to ensure other logic can be applied without special handling.
self.mutable = True
try:
if data is None:
data = []
with self.entity.session.operation_recording(False):
self.extend(data)
finally:
self.mutable = mutable
def _identity_key(self, entity):
'''Return identity key for *entity*.'''
return str(ftrack_api.inspection.identity(entity))
def __copy__(self):
'''Return shallow copy.
.. note::
To maintain expectations on usage, the shallow copy will include a
shallow copy of the underlying data store.
'''
cls = self.__class__
copied_instance = cls.__new__(cls)
copied_instance.__dict__.update(self.__dict__)
copied_instance._data = copy.copy(self._data)
copied_instance._identities = copy.copy(self._identities)
return copied_instance
def _notify(self, old_value):
'''Notify about modification.'''
# Record operation.
if self.entity.session.record_operations:
self.entity.session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
self.entity.entity_type,
ftrack_api.inspection.primary_key(self.entity),
self.attribute.name,
old_value,
self
)
)
def insert(self, index, item):
'''Insert *item* at *index*.'''
if not self.mutable:
raise ftrack_api.exception.ImmutableCollectionError(self)
if item in self:
raise ftrack_api.exception.DuplicateItemInCollectionError(
item, self
)
old_value = copy.copy(self)
self._data.insert(index, item)
self._identities.add(self._identity_key(item))
self._notify(old_value)
def __contains__(self, value):
'''Return whether *value* present in collection.'''
return self._identity_key(value) in self._identities
def __getitem__(self, index):
'''Return item at *index*.'''
return self._data[index]
def __setitem__(self, index, item):
'''Set *item* against *index*.'''
if not self.mutable:
raise ftrack_api.exception.ImmutableCollectionError(self)
try:
existing_index = self.index(item)
except ValueError:
pass
else:
if index != existing_index:
raise ftrack_api.exception.DuplicateItemInCollectionError(
item, self
)
old_value = copy.copy(self)
try:
existing_item = self._data[index]
except IndexError:
pass
else:
self._identities.remove(self._identity_key(existing_item))
self._data[index] = item
self._identities.add(self._identity_key(item))
self._notify(old_value)
def __delitem__(self, index):
'''Remove item at *index*.'''
if not self.mutable:
raise ftrack_api.exception.ImmutableCollectionError(self)
old_value = copy.copy(self)
item = self._data[index]
del self._data[index]
self._identities.remove(self._identity_key(item))
self._notify(old_value)
def __len__(self):
'''Return count of items.'''
return len(self._data)
def __eq__(self, other):
'''Return whether this collection is equal to *other*.'''
if not isinstance(other, Collection):
return False
return sorted(self._identities) == sorted(other._identities)
def __ne__(self, other):
'''Return whether this collection is not equal to *other*.'''
return not self == other
class MappedCollectionProxy(collections.MutableMapping):
'''Common base class for mapped collection of entities.'''
def __init__(self, collection):
'''Initialise proxy for *collection*.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self.collection = collection
super(MappedCollectionProxy, self).__init__()
def __copy__(self):
'''Return shallow copy.
.. note::
To maintain expectations on usage, the shallow copy will include a
shallow copy of the underlying collection.
'''
cls = self.__class__
copied_instance = cls.__new__(cls)
copied_instance.__dict__.update(self.__dict__)
copied_instance.collection = copy.copy(self.collection)
return copied_instance
@property
def mutable(self):
'''Return whether collection is mutable.'''
return self.collection.mutable
@mutable.setter
def mutable(self, value):
'''Set whether collection is mutable to *value*.'''
self.collection.mutable = value
@property
def attribute(self):
'''Return attribute bound to.'''
return self.collection.attribute
@attribute.setter
def attribute(self, value):
'''Set bound attribute to *value*.'''
self.collection.attribute = value
class KeyValueMappedCollectionProxy(MappedCollectionProxy):
'''A mapped collection of key, value entities.
Proxy a standard :class:`Collection` as a mapping where certain attributes
from the entities in the collection are mapped to key, value pairs.
For example::
>>> collection = [Metadata(key='foo', value='bar'), ...]
>>> mapped = KeyValueMappedCollectionProxy(
... collection, create_metadata,
... key_attribute='key', value_attribute='value'
... )
>>> print mapped['foo']
'bar'
>>> mapped['bam'] = 'biz'
>>> print mapped.collection[-1]
Metadata(key='bam', value='biz')
'''
def __init__(
self, collection, creator, key_attribute, value_attribute
):
'''Initialise collection.'''
self.creator = creator
self.key_attribute = key_attribute
self.value_attribute = value_attribute
super(KeyValueMappedCollectionProxy, self).__init__(collection)
def _get_entity_by_key(self, key):
'''Return entity instance with matching *key* from collection.'''
for entity in self.collection:
if entity[self.key_attribute] == key:
return entity
raise KeyError(key)
def __getitem__(self, key):
'''Return value for *key*.'''
entity = self._get_entity_by_key(key)
return entity[self.value_attribute]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
try:
entity = self._get_entity_by_key(key)
except KeyError:
data = {
self.key_attribute: key,
self.value_attribute: value
}
entity = self.creator(self, data)
if (
ftrack_api.inspection.state(entity) is
ftrack_api.symbol.CREATED
):
# Persisting this entity will be handled here, record the
# operation.
self.collection.append(entity)
else:
# The entity is created and persisted separately by the
# creator. Do not record this operation.
with self.collection.entity.session.operation_recording(False):
# Do not record this operation since it will trigger
# redudant and potentially failing operations.
self.collection.append(entity)
else:
entity[self.value_attribute] = value
def __delitem__(self, key):
'''Remove and delete *key*.
.. note::
The associated entity will be deleted as well.
'''
for index, entity in enumerate(self.collection):
if entity[self.key_attribute] == key:
break
else:
raise KeyError(key)
del self.collection[index]
entity.session.delete(entity)
def __iter__(self):
'''Iterate over all keys.'''
keys = set()
for entity in self.collection:
keys.add(entity[self.key_attribute])
return iter(keys)
def __len__(self):
'''Return count of keys.'''
keys = set()
for entity in self.collection:
keys.add(entity[self.key_attribute])
return len(keys)
def keys(self):
# COMPAT for unit tests..
return list(super(
KeyValueMappedCollectionProxy, self
).keys())
class PerSessionDefaultKeyMaker(ftrack_api.cache.KeyMaker):
'''Generate key for session.'''
def _key(self, obj):
'''Return key for *obj*.'''
if isinstance(obj, dict):
session = obj.get('session')
if session is not None:
# Key by session only.
return str(id(session))
return str(obj)
#: Memoiser for use with callables that should be called once per session.
memoise_session = ftrack_api.cache.memoise_decorator(
ftrack_api.cache.Memoiser(
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
)
)
@memoise_session
def _get_custom_attribute_configurations(session):
'''Return list of custom attribute configurations.
The configuration objects will have key, project_id, id and object_type_id
populated.
'''
return session.query(
'select key, project_id, id, object_type_id, entity_type from '
'CustomAttributeConfiguration'
).all()
class CustomAttributeCollectionProxy(MappedCollectionProxy):
'''A mapped collection of custom attribute value entities.'''
def __init__(
self, collection
):
'''Initialise collection.'''
self.key_attribute = 'configuration_id'
self.value_attribute = 'value'
super(CustomAttributeCollectionProxy, self).__init__(collection)
def _get_entity_configurations(self):
'''Return all configurations for current collection entity.'''
entity = self.collection.entity
entity_type = None
project_id = None
object_type_id = None
if 'object_type_id' in list(entity.keys()):
project_id = entity['project_id']
entity_type = 'task'
object_type_id = entity['object_type_id']
if entity.entity_type == 'AssetVersion':
project_id = entity['asset']['parent']['project_id']
entity_type = 'assetversion'
if entity.entity_type == 'Asset':
project_id = entity['parent']['project_id']
entity_type = 'asset'
if entity.entity_type == 'Project':
project_id = entity['id']
entity_type = 'show'
if entity.entity_type == 'User':
entity_type = 'user'
if entity_type is None:
raise ValueError(
'Entity {!r} not supported.'.format(entity)
)
configurations = []
for configuration in _get_custom_attribute_configurations(
entity.session
):
if (
configuration['entity_type'] == entity_type and
configuration['project_id'] in (project_id, None) and
configuration['object_type_id'] == object_type_id
):
configurations.append(configuration)
# Return with global configurations at the end of the list. This is done
# so that global conigurations are shadowed by project specific if the
# configurations list is looped when looking for a matching `key`.
return sorted(
configurations, key=lambda item: item['project_id'] is None
)
def _get_keys(self):
'''Return a list of all keys.'''
keys = []
for configuration in self._get_entity_configurations():
keys.append(configuration['key'])
return keys
def _get_entity_by_key(self, key):
'''Return entity instance with matching *key* from collection.'''
configuration_id = self.get_configuration_id_from_key(key)
for entity in self.collection:
if entity[self.key_attribute] == configuration_id:
return entity
return None
def get_configuration_id_from_key(self, key):
'''Return id of configuration with matching *key*.
Raise :exc:`KeyError` if no configuration with matching *key* found.
'''
for configuration in self._get_entity_configurations():
if key == configuration['key']:
return configuration['id']
raise KeyError(key)
def __getitem__(self, key):
'''Return value for *key*.'''
entity = self._get_entity_by_key(key)
if entity:
return entity[self.value_attribute]
for configuration in self._get_entity_configurations():
if configuration['key'] == key:
return configuration['default']
raise KeyError(key)
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
custom_attribute_value = self._get_entity_by_key(key)
if custom_attribute_value:
custom_attribute_value[self.value_attribute] = value
else:
entity = self.collection.entity
session = entity.session
data = {
self.key_attribute: self.get_configuration_id_from_key(key),
self.value_attribute: value,
'entity_id': entity['id']
}
# Make sure to use the currently active collection. This is
# necessary since a merge might have replaced the current one.
self.collection.entity['custom_attributes'].collection.append(
session.create('CustomAttributeValue', data)
)
def __delitem__(self, key):
'''Remove and delete *key*.
.. note::
The associated entity will be deleted as well.
'''
custom_attribute_value = self._get_entity_by_key(key)
if custom_attribute_value:
index = self.collection.index(custom_attribute_value)
del self.collection[index]
custom_attribute_value.session.delete(custom_attribute_value)
else:
self.logger.warning(L(
'Cannot delete {0!r} on {1!r}, no custom attribute value set.',
key, self.collection.entity
))
def __eq__(self, collection):
'''Return True if *collection* equals proxy collection.'''
if collection is ftrack_api.symbol.NOT_SET:
return False
return collection.collection == self.collection
def __iter__(self):
'''Iterate over all keys.'''
keys = self._get_keys()
return iter(keys)
def __len__(self):
'''Return count of keys.'''
keys = self._get_keys()
return len(keys)

View file

@ -0,0 +1,145 @@
# :coding: utf-8
# :copyright: Copyright (c) 2013 ftrack
from builtins import object
import os
from abc import ABCMeta, abstractmethod
import tempfile
from future.utils import with_metaclass
class Data(with_metaclass(ABCMeta, object)):
'''File-like object for manipulating data.'''
def __init__(self):
'''Initialise data access.'''
self.closed = False
@abstractmethod
def read(self, limit=None):
'''Return content from current position up to *limit*.'''
@abstractmethod
def write(self, content):
'''Write content at current position.'''
def flush(self):
'''Flush buffers ensuring data written.'''
def seek(self, offset, whence=os.SEEK_SET):
'''Move internal pointer by *offset*.
The *whence* argument is optional and defaults to os.SEEK_SET or 0
(absolute file positioning); other values are os.SEEK_CUR or 1
(seek relative to the current position) and os.SEEK_END or 2
(seek relative to the file's end).
'''
raise NotImplementedError('Seek not supported.')
def tell(self):
'''Return current position of internal pointer.'''
raise NotImplementedError('Tell not supported.')
def close(self):
'''Flush buffers and prevent further access.'''
self.flush()
self.closed = True
class FileWrapper(Data):
'''Data wrapper for Python file objects.'''
def __init__(self, wrapped_file):
'''Initialise access to *wrapped_file*.'''
self.wrapped_file = wrapped_file
self._read_since_last_write = False
super(FileWrapper, self).__init__()
def read(self, limit=None):
'''Return content from current position up to *limit*.'''
self._read_since_last_write = True
if limit is None:
limit = -1
return self.wrapped_file.read(limit)
def write(self, content):
'''Write content at current position.'''
if self._read_since_last_write:
# Windows requires a seek before switching from read to write.
self.seek(self.tell())
self.wrapped_file.write(content)
self._read_since_last_write = False
def flush(self):
'''Flush buffers ensuring data written.'''
super(FileWrapper, self).flush()
if hasattr(self.wrapped_file, 'flush'):
self.wrapped_file.flush()
def seek(self, offset, whence=os.SEEK_SET):
'''Move internal pointer by *offset*.'''
self.wrapped_file.seek(offset, whence)
def tell(self):
'''Return current position of internal pointer.'''
return self.wrapped_file.tell()
def close(self):
'''Flush buffers and prevent further access.'''
if not self.closed:
super(FileWrapper, self).close()
if hasattr(self.wrapped_file, 'close'):
self.wrapped_file.close()
class File(FileWrapper):
'''Data wrapper accepting filepath.'''
def __init__(self, path, mode='rb'):
'''Open file at *path* with *mode*.'''
file_object = open(path, mode)
super(File, self).__init__(file_object)
class String(FileWrapper):
'''Data wrapper using TemporaryFile instance.'''
def __init__(self, content=None):
'''Initialise data with *content*.'''
# Track if data is binary or not. If it is binary then read should also
# return binary.
self.is_binary = True
super(String, self).__init__(
tempfile.TemporaryFile()
)
if content is not None:
if not isinstance(content, bytes):
self.is_binary = False
content = content.encode()
self.wrapped_file.write(content)
self.wrapped_file.seek(0)
def write(self, content):
if not isinstance(content, bytes):
self.is_binary = False
content = content.encode()
super(String, self).write(
content
)
def read(self, limit=None):
content = super(String, self).read(limit)
if not self.is_binary:
content = content.decode('utf-8')
return content

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,91 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api.entity.base
class AssetVersion(ftrack_api.entity.base.Entity):
'''Represent asset version.'''
def create_component(
self, path, data=None, location=None
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`). This version is
automatically set as the component's version.
If *location* is specified then automatically add component to that
location.
'''
if data is None:
data = {}
data.pop('version_id', None)
data['version'] = self
return self.session.create_component(path, data=data, location=location)
def encode_media(self, media, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible, and will be
set as the version's thumbnail.
The new components will automatically be associated with the version.
A server version of 3.3.32 or higher is required for this to function
properly.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
return self.session.encode_media(
media, version_id=self['id'], keep_original=keep_original
)

View file

@ -0,0 +1,407 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
from builtins import str
import abc
import collections
import logging
import ftrack_api.symbol
import ftrack_api.attribute
import ftrack_api.inspection
import ftrack_api.exception
import ftrack_api.operation
from ftrack_api.logging import LazyLogMessage as L
from future.utils import with_metaclass
class _EntityBase(object):
'''Base class to allow for mixins, we need a common base.'''
pass
class DynamicEntityTypeMetaclass(abc.ABCMeta):
'''Custom metaclass to customise representation of dynamic classes.
.. note::
Derive from same metaclass as derived bases to avoid conflicts.
'''
def __repr__(self):
'''Return representation of class.'''
return '<dynamic ftrack class \'{0}\'>'.format(self.__name__)
class Entity(with_metaclass(DynamicEntityTypeMetaclass, _EntityBase, collections.MutableMapping)):
'''Base class for all entities.'''
entity_type = 'Entity'
attributes = None
primary_key_attributes = None
default_projections = None
def __init__(self, session, data=None, reconstructing=False):
'''Initialise entity.
*session* is an instance of :class:`ftrack_api.session.Session` that
this entity instance is bound to.
*data* is a mapping of key, value pairs to apply as initial attribute
values.
*reconstructing* indicates whether this entity is being reconstructed,
such as from a query, and therefore should not have any special creation
logic applied, such as initialising defaults for missing data.
'''
super(Entity, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self.session = session
self._inflated = set()
if data is None:
data = {}
self.logger.debug(L(
'{0} entity from {1!r}.',
('Reconstructing' if reconstructing else 'Constructing'), data
))
self._ignore_data_keys = ['__entity_type__']
if not reconstructing:
self._construct(data)
else:
self._reconstruct(data)
def _construct(self, data):
'''Construct from *data*.'''
# Suspend operation recording so that all modifications can be applied
# in single create operation. In addition, recording a modification
# operation requires a primary key which may not be available yet.
relational_attributes = dict()
with self.session.operation_recording(False):
# Set defaults for any unset local attributes.
for attribute in self.__class__.attributes:
if attribute.name not in data:
default_value = attribute.default_value
if callable(default_value):
default_value = default_value(self)
attribute.set_local_value(self, default_value)
# Data represents locally set values.
for key, value in list(data.items()):
if key in self._ignore_data_keys:
continue
attribute = self.__class__.attributes.get(key)
if attribute is None:
self.logger.debug(L(
'Cannot populate {0!r} attribute as no such '
'attribute found on entity {1!r}.', key, self
))
continue
if not isinstance(attribute, ftrack_api.attribute.ScalarAttribute):
relational_attributes.setdefault(
attribute, value
)
else:
attribute.set_local_value(self, value)
# Record create operation.
# Note: As this operation is recorded *before* any Session.merge takes
# place there is the possibility that the operation will hold references
# to outdated data in entity_data. However, this would be unusual in
# that it would mean the same new entity was created twice and only one
# altered. Conversely, if this operation were recorded *after*
# Session.merge took place, any cache would not be able to determine
# the status of the entity, which could be important if the cache should
# not store newly created entities that have not yet been persisted. Out
# of these two 'evils' this approach is deemed the lesser at this time.
# A third, more involved, approach to satisfy both might be to record
# the operation with a PENDING entity_data value and then update with
# merged values post merge.
if self.session.record_operations:
entity_data = {}
# Lower level API used here to avoid including any empty
# collections that are automatically generated on access.
for attribute in self.attributes:
value = attribute.get_local_value(self)
if value is not ftrack_api.symbol.NOT_SET:
entity_data[attribute.name] = value
self.session.recorded_operations.push(
ftrack_api.operation.CreateEntityOperation(
self.entity_type,
ftrack_api.inspection.primary_key(self),
entity_data
)
)
for attribute, value in list(relational_attributes.items()):
# Finally we set values for "relational" attributes, we need
# to do this at the end in order to get the create operations
# in the correct order as the newly created attributes might
# contain references to the newly created entity.
attribute.set_local_value(
self, value
)
def _reconstruct(self, data):
'''Reconstruct from *data*.'''
# Data represents remote values.
for key, value in list(data.items()):
if key in self._ignore_data_keys:
continue
attribute = self.__class__.attributes.get(key)
if attribute is None:
self.logger.debug(L(
'Cannot populate {0!r} attribute as no such attribute '
'found on entity {1!r}.', key, self
))
continue
attribute.set_remote_value(self, value)
def __repr__(self):
'''Return representation of instance.'''
return '<dynamic ftrack {0} object {1}>'.format(
self.__class__.__name__, id(self)
)
def __str__(self):
'''Return string representation of instance.'''
with self.session.auto_populating(False):
primary_key = ['Unknown']
try:
primary_key = list(ftrack_api.inspection.primary_key(self).values())
except KeyError:
pass
return '<{0}({1})>'.format(
self.__class__.__name__, ', '.join(primary_key)
)
def __hash__(self):
'''Return hash representing instance.'''
return hash(str(ftrack_api.inspection.identity(self)))
def __eq__(self, other):
'''Return whether *other* is equal to this instance.
.. note::
Equality is determined by both instances having the same identity.
Values of attributes are not considered.
'''
try:
return (
ftrack_api.inspection.identity(other)
== ftrack_api.inspection.identity(self)
)
except (AttributeError, KeyError):
return False
def __getitem__(self, key):
'''Return attribute value for *key*.'''
attribute = self.__class__.attributes.get(key)
if attribute is None:
raise KeyError(key)
return attribute.get_value(self)
def __setitem__(self, key, value):
'''Set attribute *value* for *key*.'''
attribute = self.__class__.attributes.get(key)
if attribute is None:
raise KeyError(key)
attribute.set_local_value(self, value)
def __delitem__(self, key):
'''Clear attribute value for *key*.
.. note::
Will not remove the attribute, but instead clear any local value
and revert to the last known server value.
'''
attribute = self.__class__.attributes.get(key)
attribute.set_local_value(self, ftrack_api.symbol.NOT_SET)
def __iter__(self):
'''Iterate over all attributes keys.'''
for attribute in self.__class__.attributes:
yield attribute.name
def __len__(self):
'''Return count of attributes.'''
return len(self.__class__.attributes)
def values(self):
'''Return list of values.'''
if self.session.auto_populate:
self._populate_unset_scalar_attributes()
return list(super(Entity, self).values())
def items(self):
'''Return list of tuples of (key, value) pairs.
.. note::
Will fetch all values from the server if not already fetched or set
locally.
'''
if self.session.auto_populate:
self._populate_unset_scalar_attributes()
return list(super(Entity, self).items())
def clear(self):
'''Reset all locally modified attribute values.'''
for attribute in self:
del self[attribute]
def merge(self, entity, merged=None):
'''Merge *entity* attribute values and other data into this entity.
Only merge values from *entity* that are not
:attr:`ftrack_api.symbol.NOT_SET`.
Return a list of changes made with each change being a mapping with
the keys:
* type - Either 'remote_attribute', 'local_attribute' or 'property'.
* name - The name of the attribute / property modified.
* old_value - The previous value.
* new_value - The new merged value.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
log_message = 'Merged {type} "{name}": {old_value!r} -> {new_value!r}'
changes = []
# Attributes.
# Prioritise by type so that scalar values are set first. This should
# guarantee that the attributes making up the identity of the entity
# are merged before merging any collections that may have references to
# this entity.
attributes = collections.deque()
for attribute in entity.attributes:
if isinstance(attribute, ftrack_api.attribute.ScalarAttribute):
attributes.appendleft(attribute)
else:
attributes.append(attribute)
for other_attribute in attributes:
attribute = self.attributes.get(other_attribute.name)
# Local attributes.
other_local_value = other_attribute.get_local_value(entity)
if other_local_value is not ftrack_api.symbol.NOT_SET:
local_value = attribute.get_local_value(self)
if local_value != other_local_value:
merged_local_value = self.session.merge(
other_local_value, merged=merged
)
attribute.set_local_value(self, merged_local_value)
changes.append({
'type': 'local_attribute',
'name': attribute.name,
'old_value': local_value,
'new_value': merged_local_value
})
log_debug and self.logger.debug(
log_message.format(**changes[-1])
)
# Remote attributes.
other_remote_value = other_attribute.get_remote_value(entity)
if other_remote_value is not ftrack_api.symbol.NOT_SET:
remote_value = attribute.get_remote_value(self)
if remote_value != other_remote_value:
merged_remote_value = self.session.merge(
other_remote_value, merged=merged
)
attribute.set_remote_value(
self, merged_remote_value
)
changes.append({
'type': 'remote_attribute',
'name': attribute.name,
'old_value': remote_value,
'new_value': merged_remote_value
})
log_debug and self.logger.debug(
log_message.format(**changes[-1])
)
# We need to handle collections separately since
# they may store a local copy of the remote attribute
# even though it may not be modified.
if not isinstance(
attribute, ftrack_api.attribute.AbstractCollectionAttribute
):
continue
local_value = attribute.get_local_value(
self
)
# Populated but not modified, update it.
if (
local_value is not ftrack_api.symbol.NOT_SET and
local_value == remote_value
):
attribute.set_local_value(
self, merged_remote_value
)
changes.append({
'type': 'local_attribute',
'name': attribute.name,
'old_value': local_value,
'new_value': merged_remote_value
})
log_debug and self.logger.debug(
log_message.format(**changes[-1])
)
return changes
def _populate_unset_scalar_attributes(self):
'''Populate all unset scalar attributes in one query.'''
projections = []
for attribute in self.attributes:
if isinstance(attribute, ftrack_api.attribute.ScalarAttribute):
if attribute.get_remote_value(self) is ftrack_api.symbol.NOT_SET:
projections.append(attribute.name)
if projections:
self.session.populate([self], ', '.join(projections))

View file

@ -0,0 +1,75 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
from builtins import object
import ftrack_api.entity.base
class Component(ftrack_api.entity.base.Entity):
'''Represent a component.'''
def get_availability(self, locations=None):
'''Return availability in *locations*.
If *locations* is None, all known locations will be checked.
Return a dictionary of {location_id:percentage_availability}
'''
return self.session.get_component_availability(
self, locations=locations
)
class CreateThumbnailMixin(object):
'''Mixin to add create_thumbnail method on entity class.'''
def create_thumbnail(self, path, data=None):
'''Set entity thumbnail from *path*.
Creates a thumbnail component using in the ftrack.server location
:meth:`Session.create_component
<ftrack_api.session.Session.create_component>` The thumbnail component
will be created using *data* if specified. If no component name is
given, `thumbnail` will be used.
The file is expected to be of an appropriate size and valid file
type.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` will be
automatically issued.
'''
if data is None:
data = {}
if not data.get('name'):
data['name'] = 'thumbnail'
thumbnail_component = self.session.create_component(
path, data, location=None
)
origin_location = self.session.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
server_location = self.session.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
server_location.add_component(thumbnail_component, [origin_location])
# TODO: This commit can be avoided by reordering the operations in
# this method so that the component is transferred to ftrack.server
# after the thumbnail has been set.
#
# There is currently a bug in the API backend, causing the operations
# to *some* times be ordered wrongly, where the update occurs before
# the component has been created, causing an integrity error.
#
# Once this issue has been resolved, this commit can be removed and
# and the update placed between component creation and registration.
self['thumbnail_id'] = thumbnail_component['id']
self.session.commit()
return thumbnail_component

View file

@ -0,0 +1,443 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
from builtins import str
from builtins import object
import logging
import uuid
import functools
import ftrack_api.attribute
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.entity.component
import ftrack_api.entity.asset_version
import ftrack_api.entity.project_schema
import ftrack_api.entity.note
import ftrack_api.entity.job
import ftrack_api.entity.user
import ftrack_api.symbol
import ftrack_api.cache
from ftrack_api.logging import LazyLogMessage as L
class Factory(object):
'''Entity class factory.'''
def __init__(self):
'''Initialise factory.'''
super(Factory, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.
*bases* should be a list of bases to give the constructed class. If not
specified, default to :class:`ftrack_api.entity.base.Entity`.
'''
entity_type = schema['id']
class_name = entity_type
class_bases = bases
if class_bases is None:
class_bases = [ftrack_api.entity.base.Entity]
class_namespace = dict()
# Build attributes for class.
attributes = ftrack_api.attribute.Attributes()
immutable_properties = schema.get('immutable', [])
computed_properties = schema.get('computed', [])
for name, fragment in list(schema.get('properties', {}).items()):
mutable = name not in immutable_properties
computed = name in computed_properties
default = fragment.get('default', ftrack_api.symbol.NOT_SET)
if default == '{uid}':
default = lambda instance: str(uuid.uuid4())
data_type = fragment.get('type', ftrack_api.symbol.NOT_SET)
if data_type is not ftrack_api.symbol.NOT_SET:
if data_type in (
'string', 'boolean', 'integer', 'number', 'variable',
'object'
):
# Basic scalar attribute.
if data_type == 'number':
data_type = 'float'
if data_type == 'string':
data_format = fragment.get('format')
if data_format == 'date-time':
data_type = 'datetime'
attribute = self.create_scalar_attribute(
class_name, name, mutable, computed, default, data_type
)
if attribute:
attributes.add(attribute)
elif data_type == 'array':
attribute = self.create_collection_attribute(
class_name, name, mutable
)
if attribute:
attributes.add(attribute)
elif data_type == 'mapped_array':
reference = fragment.get('items', {}).get('$ref')
if not reference:
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that does '
'not define a schema reference.', class_name, name
))
continue
attribute = self.create_mapped_collection_attribute(
class_name, name, mutable, reference
)
if attribute:
attributes.add(attribute)
else:
self.logger.debug(L(
'Skipping {0}.{1} attribute with unrecognised data '
'type {2}', class_name, name, data_type
))
else:
# Reference attribute.
reference = fragment.get('$ref', ftrack_api.symbol.NOT_SET)
if reference is ftrack_api.symbol.NOT_SET:
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that does '
'not define a schema reference.', class_name, name
))
continue
attribute = self.create_reference_attribute(
class_name, name, mutable, reference
)
if attribute:
attributes.add(attribute)
default_projections = schema.get('default_projections', [])
# Construct class.
class_namespace['entity_type'] = entity_type
class_namespace['attributes'] = attributes
class_namespace['primary_key_attributes'] = schema['primary_key'][:]
class_namespace['default_projections'] = default_projections
from future.utils import (
native_str
)
cls = type(
native_str(class_name), # type doesn't accept unicode.
tuple(class_bases),
class_namespace
)
return cls
def create_scalar_attribute(
self, class_name, name, mutable, computed, default, data_type
):
'''Return appropriate scalar attribute instance.'''
return ftrack_api.attribute.ScalarAttribute(
name, data_type=data_type, default_value=default, mutable=mutable,
computed=computed
)
def create_reference_attribute(self, class_name, name, mutable, reference):
'''Return appropriate reference attribute instance.'''
return ftrack_api.attribute.ReferenceAttribute(
name, reference, mutable=mutable
)
def create_collection_attribute(self, class_name, name, mutable):
'''Return appropriate collection attribute instance.'''
return ftrack_api.attribute.CollectionAttribute(
name, mutable=mutable
)
def create_mapped_collection_attribute(
self, class_name, name, mutable, reference
):
'''Return appropriate mapped collection attribute instance.'''
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that has '
'no implementation defined for reference {2}.',
class_name, name, reference
))
class PerSessionDefaultKeyMaker(ftrack_api.cache.KeyMaker):
'''Generate key for defaults.'''
def _key(self, obj):
'''Return key for *obj*.'''
if isinstance(obj, dict):
entity = obj.get('entity')
if entity is not None:
# Key by session only.
return str(id(entity.session))
return str(obj)
#: Memoiser for use with default callables that should only be called once per
# session.
memoise_defaults = ftrack_api.cache.memoise_decorator(
ftrack_api.cache.Memoiser(
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
)
)
#: Memoiser for use with callables that should be called once per session.
memoise_session = ftrack_api.cache.memoise_decorator(
ftrack_api.cache.Memoiser(
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
)
)
@memoise_session
def _get_custom_attribute_configurations(session):
'''Return list of custom attribute configurations.
The configuration objects will have key, project_id, id and object_type_id
populated.
'''
return session.query(
'select key, project_id, id, object_type_id, entity_type, '
'is_hierarchical from CustomAttributeConfiguration'
).all()
def _get_entity_configurations(entity):
'''Return all configurations for current collection entity.'''
entity_type = None
project_id = None
object_type_id = None
if 'object_type_id' in entity.keys():
project_id = entity['project_id']
entity_type = 'task'
object_type_id = entity['object_type_id']
if entity.entity_type == 'AssetVersion':
project_id = entity['asset']['parent']['project_id']
entity_type = 'assetversion'
if entity.entity_type == 'Project':
project_id = entity['id']
entity_type = 'show'
if entity.entity_type == 'User':
entity_type = 'user'
if entity.entity_type == 'Asset':
entity_type = 'asset'
if entity.entity_type in ('TypedContextList', 'AssetVersionList'):
entity_type = 'list'
if entity_type is None:
raise ValueError(
'Entity {!r} not supported.'.format(entity)
)
configurations = []
for configuration in _get_custom_attribute_configurations(
entity.session
):
if (
configuration['entity_type'] == entity_type and
configuration['project_id'] in (project_id, None) and
configuration['object_type_id'] == object_type_id
):
# The custom attribute configuration is for the target entity type.
configurations.append(configuration)
elif (
entity_type in ('asset', 'assetversion', 'show', 'task') and
configuration['project_id'] in (project_id, None) and
configuration['is_hierarchical']
):
# The target entity type allows hierarchical attributes.
configurations.append(configuration)
# Return with global configurations at the end of the list. This is done
# so that global conigurations are shadowed by project specific if the
# configurations list is looped when looking for a matching `key`.
return sorted(
configurations, key=lambda item: item['project_id'] is None
)
class StandardFactory(Factory):
'''Standard entity class factory.'''
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.'''
if not bases:
bases = []
extra_bases = []
# Customise classes.
if schema['id'] == 'ProjectSchema':
extra_bases = [ftrack_api.entity.project_schema.ProjectSchema]
elif schema['id'] == 'Location':
extra_bases = [ftrack_api.entity.location.Location]
elif schema['id'] == 'AssetVersion':
extra_bases = [ftrack_api.entity.asset_version.AssetVersion]
elif schema['id'].endswith('Component'):
extra_bases = [ftrack_api.entity.component.Component]
elif schema['id'] == 'Note':
extra_bases = [ftrack_api.entity.note.Note]
elif schema['id'] == 'Job':
extra_bases = [ftrack_api.entity.job.Job]
elif schema['id'] == 'User':
extra_bases = [ftrack_api.entity.user.User]
bases = extra_bases + bases
# If bases does not contain any items, add the base entity class.
if not bases:
bases = [ftrack_api.entity.base.Entity]
# Add mixins.
if 'notes' in schema.get('properties', {}):
bases.append(
ftrack_api.entity.note.CreateNoteMixin
)
if 'thumbnail_id' in schema.get('properties', {}):
bases.append(
ftrack_api.entity.component.CreateThumbnailMixin
)
cls = super(StandardFactory, self).create(schema, bases=bases)
return cls
def create_mapped_collection_attribute(
self, class_name, name, mutable, reference
):
'''Return appropriate mapped collection attribute instance.'''
if reference == 'Metadata':
def create_metadata(proxy, data, reference):
'''Return metadata for *data*.'''
entity = proxy.collection.entity
session = entity.session
data.update({
'parent_id': entity['id'],
'parent_type': entity.entity_type
})
return session.create(reference, data)
creator = functools.partial(
create_metadata, reference=reference
)
key_attribute = 'key'
value_attribute = 'value'
return ftrack_api.attribute.KeyValueMappedCollectionAttribute(
name, creator, key_attribute, value_attribute, mutable=mutable
)
elif reference == 'CustomAttributeValue':
return (
ftrack_api.attribute.CustomAttributeCollectionAttribute(
name, mutable=mutable
)
)
elif reference.endswith('CustomAttributeValue'):
def creator(proxy, data):
'''Create a custom attribute based on *proxy* and *data*.
Raise :py:exc:`KeyError` if related entity is already presisted
to the server. The proxy represents dense custom attribute
values and should never create new custom attribute values
through the proxy if entity exists on the remote.
If the entity is not persisted the ususal
<entity_type>CustomAttributeValue items cannot be updated as
the related entity does not exist on remote and values not in
the proxy. Instead a <entity_type>CustomAttributeValue will
be reconstructed and an update operation will be recorded.
'''
entity = proxy.collection.entity
if (
ftrack_api.inspection.state(entity) is not
ftrack_api.symbol.CREATED
):
raise KeyError(
'Custom attributes must be created explicitly for the '
'given entity type before being set.'
)
configuration = None
for candidate in _get_entity_configurations(entity):
if candidate['key'] == data['key']:
configuration = candidate
break
if configuration is None:
raise ValueError(
u'No valid custom attribute for data {0!r} was found.'
.format(data)
)
create_data = dict(list(data.items()))
create_data['configuration_id'] = configuration['id']
create_data['entity_id'] = entity['id']
session = entity.session
# Create custom attribute by reconstructing it and update the
# value. This will prevent a create operation to be sent to the
# remote, as create operations for this entity type is not
# allowed. Instead an update operation will be recorded.
value = create_data.pop('value')
item = session.create(
reference,
create_data,
reconstructing=True
)
# Record update operation.
item['value'] = value
return item
key_attribute = 'key'
value_attribute = 'value'
return ftrack_api.attribute.KeyValueMappedCollectionAttribute(
name, creator, key_attribute, value_attribute, mutable=mutable
)
self.logger.debug(L(
'Skipping {0}.{1} mapped_array attribute that has no configuration '
'for reference {2}.', class_name, name, reference
))

View file

@ -0,0 +1,48 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api.entity.base
class Job(ftrack_api.entity.base.Entity):
'''Represent job.'''
def __init__(self, session, data=None, reconstructing=False):
'''Initialise entity.
*session* is an instance of :class:`ftrack_api.session.Session` that
this entity instance is bound to.
*data* is a mapping of key, value pairs to apply as initial attribute
values.
To set a job `description` visible in the web interface, *data* can
contain a key called `data` which should be a JSON serialised
dictionary containing description::
data = {
'status': 'running',
'data': json.dumps(dict(description='My job description.')),
...
}
Will raise a :py:exc:`ValueError` if *data* contains `type` and `type`
is set to something not equal to "api_job".
*reconstructing* indicates whether this entity is being reconstructed,
such as from a query, and therefore should not have any special creation
logic applied, such as initialising defaults for missing data.
'''
if not reconstructing:
if data.get('type') not in ('api_job', None):
raise ValueError(
'Invalid job type "{0}". Must be "api_job"'.format(
data.get('type')
)
)
super(Job, self).__init__(
session, data=data, reconstructing=reconstructing
)

View file

@ -0,0 +1,745 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
from builtins import zip
from six import string_types
from builtins import object
import collections
import functools
import ftrack_api.entity.base
import ftrack_api.exception
import ftrack_api.event.base
import ftrack_api.symbol
import ftrack_api.inspection
from ftrack_api.logging import LazyLogMessage as L
from future.utils import with_metaclass
MixinBaseClass = with_metaclass(
ftrack_api.entity.base.DynamicEntityTypeMetaclass,
ftrack_api.entity.base._EntityBase,
collections.MutableMapping
)
class Location(ftrack_api.entity.base.Entity):
'''Represent storage for components.'''
def __init__(self, session, data=None, reconstructing=False):
'''Initialise entity.
*session* is an instance of :class:`ftrack_api.session.Session` that
this entity instance is bound to.
*data* is a mapping of key, value pairs to apply as initial attribute
values.
*reconstructing* indicates whether this entity is being reconstructed,
such as from a query, and therefore should not have any special creation
logic applied, such as initialising defaults for missing data.
'''
self.accessor = ftrack_api.symbol.NOT_SET
self.structure = ftrack_api.symbol.NOT_SET
self.resource_identifier_transformer = ftrack_api.symbol.NOT_SET
self.priority = 95
super(Location, self).__init__(
session, data=data, reconstructing=reconstructing
)
def __str__(self):
'''Return string representation of instance.'''
representation = super(Location, self).__str__()
with self.session.auto_populating(False):
name = self['name']
if name is not ftrack_api.symbol.NOT_SET:
representation = representation.replace(
'(', '("{0}", '.format(name)
)
return representation
def add_component(self, component, source, recursive=True):
'''Add *component* to location.
*component* should be a single component instance.
*source* should be an instance of another location that acts as the
source.
Raise :exc:`ftrack_api.ComponentInLocationError` if the *component*
already exists in this location.
Raise :exc:`ftrack_api.LocationError` if managing data and the generated
target structure for the component already exists according to the
accessor. This helps prevent potential data loss by avoiding overwriting
existing data. Note that there is a race condition between the check and
the write so if another process creates data at the same target during
that period it will be overwritten.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the component registration.
'''
return self.add_components(
[component], sources=source, recursive=recursive
)
def add_components(self, components, sources, recursive=True, _depth=0):
'''Add *components* to location.
*components* should be a list of component instances.
*sources* may be either a single source or a list of sources. If a list
then each corresponding index in *sources* will be used for each
*component*. A source should be an instance of another location.
Raise :exc:`ftrack_api.exception.ComponentInLocationError` if any
component in *components* already exists in this location. In this case,
no changes will be made and no data transferred.
Raise :exc:`ftrack_api.exception.LocationError` if managing data and the
generated target structure for the component already exists according to
the accessor. This helps prevent potential data loss by avoiding
overwriting existing data. Note that there is a race condition between
the check and the write so if another process creates data at the same
target during that period it will be overwritten.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration.
.. important::
If this location manages data then the *components* data is first
transferred to the target prescribed by the structure plugin, using
the configured accessor. If any component fails to transfer then
:exc:`ftrack_api.exception.LocationError` is raised and none of the
components are registered with the database. In this case it is left
up to the caller to decide and act on manually cleaning up any
transferred data using the 'transferred' detail in the raised error.
Likewise, after transfer, all components are registered with the
database in a batch call. If any component causes an error then all
components will remain unregistered and
:exc:`ftrack_api.exception.LocationError` will be raised detailing
issues and any transferred data under the 'transferred' detail key.
'''
if (
isinstance(sources, string_types)
or not isinstance(sources, collections.Sequence)
):
sources = [sources]
sources_count = len(sources)
if sources_count not in (1, len(components)):
raise ValueError(
'sources must be either a single source or a sequence of '
'sources with indexes corresponding to passed components.'
)
if not self.structure:
raise ftrack_api.exception.LocationError(
'No structure defined for location {location}.',
details=dict(location=self)
)
if not components:
# Optimisation: Return early when no components to process, such as
# when called recursively on an empty sequence component.
return
indent = ' ' * (_depth + 1)
# Check that components not already added to location.
existing_components = []
try:
self.get_resource_identifiers(components)
except ftrack_api.exception.ComponentNotInLocationError as error:
missing_component_ids = [
missing_component['id']
for missing_component in error.details['components']
]
for component in components:
if component['id'] not in missing_component_ids:
existing_components.append(component)
else:
existing_components.extend(components)
if existing_components:
# Some of the components already present in location.
raise ftrack_api.exception.ComponentInLocationError(
existing_components, self
)
# Attempt to transfer each component's data to this location.
transferred = []
for index, component in enumerate(components):
try:
# Determine appropriate source.
if sources_count == 1:
source = sources[0]
else:
source = sources[index]
# Add members first for container components.
is_container = 'members' in list(component.keys())
if is_container and recursive:
self.add_components(
component['members'], source, recursive=recursive,
_depth=(_depth + 1)
)
# Add component to this location.
context = self._get_context(component, source)
resource_identifier = self.structure.get_resource_identifier(
component, context
)
# Manage data transfer.
self._add_data(component, resource_identifier, source)
except Exception as error:
raise ftrack_api.exception.LocationError(
'Failed to transfer component {component} data to location '
'{location} due to error:\n{indent}{error}\n{indent}'
'Transferred component data that may require cleanup: '
'{transferred}',
details=dict(
indent=indent,
component=component,
location=self,
error=error,
transferred=transferred
)
)
else:
transferred.append((component, resource_identifier))
# Register all successfully transferred components.
components_to_register = []
component_resource_identifiers = []
try:
for component, resource_identifier in transferred:
if self.resource_identifier_transformer:
# Optionally encode resource identifier before storing.
resource_identifier = (
self.resource_identifier_transformer.encode(
resource_identifier,
context={'component': component}
)
)
components_to_register.append(component)
component_resource_identifiers.append(resource_identifier)
# Store component in location information.
self._register_components_in_location(
components, component_resource_identifiers
)
except Exception as error:
raise ftrack_api.exception.LocationError(
'Failed to register components with location {location} due to '
'error:\n{indent}{error}\n{indent}Transferred component data '
'that may require cleanup: {transferred}',
details=dict(
indent=indent,
location=self,
error=error,
transferred=transferred
)
)
# Publish events.
for component in components_to_register:
component_id = list(ftrack_api.inspection.primary_key(
component
).values())[0]
location_id = list(ftrack_api.inspection.primary_key(self).values())[0]
self.session.event_hub.publish(
ftrack_api.event.base.Event(
topic=ftrack_api.symbol.COMPONENT_ADDED_TO_LOCATION_TOPIC,
data=dict(
component_id=component_id,
location_id=location_id
),
),
on_error='ignore'
)
def _get_context(self, component, source):
'''Return context for *component* and *source*.'''
context = {}
if source:
try:
source_resource_identifier = source.get_resource_identifier(
component
)
except ftrack_api.exception.ComponentNotInLocationError:
pass
else:
context.update(dict(
source_resource_identifier=source_resource_identifier
))
return context
def _add_data(self, component, resource_identifier, source):
'''Manage transfer of *component* data from *source*.
*resource_identifier* specifies the identifier to use with this
locations accessor.
'''
self.logger.debug(L(
'Adding data for component {0!r} from source {1!r} to location '
'{2!r} using resource identifier {3!r}.',
component, resource_identifier, source, self
))
# Read data from source and write to this location.
if not source.accessor:
raise ftrack_api.exception.LocationError(
'No accessor defined for source location {location}.',
details=dict(location=source)
)
if not self.accessor:
raise ftrack_api.exception.LocationError(
'No accessor defined for target location {location}.',
details=dict(location=self)
)
is_container = 'members' in list(component.keys())
if is_container:
# TODO: Improve this check. Possibly introduce an inspection
# such as ftrack_api.inspection.is_sequence_component.
if component.entity_type != 'SequenceComponent':
self.accessor.make_container(resource_identifier)
else:
# Try to make container of component.
try:
container = self.accessor.get_container(
resource_identifier
)
except ftrack_api.exception.AccessorParentResourceNotFoundError:
# Container could not be retrieved from
# resource_identifier. Assume that there is no need to
# make the container.
pass
else:
# No need for existence check as make_container does not
# recreate existing containers.
self.accessor.make_container(container)
if self.accessor.exists(resource_identifier):
# Note: There is a race condition here in that the
# data may be added externally between the check for
# existence and the actual write which would still
# result in potential data loss. However, there is no
# good cross platform, cross accessor solution for this
# at present.
raise ftrack_api.exception.LocationError(
'Cannot add component as data already exists and '
'overwriting could result in data loss. Computed '
'target resource identifier was: {0}'
.format(resource_identifier)
)
# Read and write data.
source_data = source.accessor.open(
source.get_resource_identifier(component), 'rb'
)
target_data = self.accessor.open(resource_identifier, 'wb')
# Read/write data in chunks to avoid reading all into memory at the
# same time.
chunked_read = functools.partial(
source_data.read, ftrack_api.symbol.CHUNK_SIZE
)
for chunk in iter(chunked_read, b''):
target_data.write(chunk)
target_data.close()
source_data.close()
def _register_component_in_location(self, component, resource_identifier):
'''Register *component* in location against *resource_identifier*.'''
return self._register_components_in_location(
[component], [resource_identifier]
)
def _register_components_in_location(
self, components, resource_identifiers
):
'''Register *components* in location against *resource_identifiers*.
Indices of *components* and *resource_identifiers* should align.
'''
for component, resource_identifier in zip(
components, resource_identifiers
):
self.session.create(
'ComponentLocation', data=dict(
component=component,
location=self,
resource_identifier=resource_identifier
)
)
self.session.commit()
def remove_component(self, component, recursive=True):
'''Remove *component* from location.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the component deregistration.
'''
return self.remove_components([component], recursive=recursive)
def remove_components(self, components, recursive=True):
'''Remove *components* from location.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components deregistration.
'''
for component in components:
# Check component is in this location
self.get_resource_identifier(component)
# Remove members first for container components.
is_container = 'members' in list(component.keys())
if is_container and recursive:
self.remove_components(
component['members'], recursive=recursive
)
# Remove data.
self._remove_data(component)
# Remove metadata.
self._deregister_component_in_location(component)
# Emit event.
component_id = list(ftrack_api.inspection.primary_key(
component
).values())[0]
location_id = list(ftrack_api.inspection.primary_key(self).values())[0]
self.session.event_hub.publish(
ftrack_api.event.base.Event(
topic=ftrack_api.symbol.COMPONENT_REMOVED_FROM_LOCATION_TOPIC,
data=dict(
component_id=component_id,
location_id=location_id
)
),
on_error='ignore'
)
def _remove_data(self, component):
'''Remove data associated with *component*.'''
if not self.accessor:
raise ftrack_api.exception.LocationError(
'No accessor defined for location {location}.',
details=dict(location=self)
)
try:
self.accessor.remove(
self.get_resource_identifier(component)
)
except ftrack_api.exception.AccessorResourceNotFoundError:
# If accessor does not support detecting sequence paths then an
# AccessorResourceNotFoundError is raised. For now, if the
# component type is 'SequenceComponent' assume success.
if not component.entity_type == 'SequenceComponent':
raise
def _deregister_component_in_location(self, component):
'''Deregister *component* from location.'''
component_id = list(ftrack_api.inspection.primary_key(component).values())[0]
location_id = list(ftrack_api.inspection.primary_key(self).values())[0]
# TODO: Use session.get for optimisation.
component_location = self.session.query(
'ComponentLocation where component_id is {0} and location_id is '
'{1}'.format(component_id, location_id)
)[0]
self.session.delete(component_location)
# TODO: Should auto-commit here be optional?
self.session.commit()
def get_component_availability(self, component):
'''Return availability of *component* in this location as a float.'''
return self.session.get_component_availability(
component, locations=[self]
)[self['id']]
def get_component_availabilities(self, components):
'''Return availabilities of *components* in this location.
Return list of float values corresponding to each component.
'''
return [
availability[self['id']] for availability in
self.session.get_component_availabilities(
components, locations=[self]
)
]
def get_resource_identifier(self, component):
'''Return resource identifier for *component*.
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if the
component is not present in this location.
'''
return self.get_resource_identifiers([component])[0]
def get_resource_identifiers(self, components):
'''Return resource identifiers for *components*.
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any
of the components are not present in this location.
'''
resource_identifiers = self._get_resource_identifiers(components)
# Optionally decode resource identifier.
if self.resource_identifier_transformer:
for index, resource_identifier in enumerate(resource_identifiers):
resource_identifiers[index] = (
self.resource_identifier_transformer.decode(
resource_identifier,
context={'component': components[index]}
)
)
return resource_identifiers
def _get_resource_identifiers(self, components):
'''Return resource identifiers for *components*.
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any
of the components are not present in this location.
'''
component_ids_mapping = collections.OrderedDict()
for component in components:
component_id = list(ftrack_api.inspection.primary_key(
component
).values())[0]
component_ids_mapping[component_id] = component
component_locations = self.session.query(
'select component_id, resource_identifier from ComponentLocation '
'where location_id is {0} and component_id in ({1})'
.format(
list(ftrack_api.inspection.primary_key(self).values())[0],
', '.join(list(component_ids_mapping.keys()))
)
)
resource_identifiers_map = {}
for component_location in component_locations:
resource_identifiers_map[component_location['component_id']] = (
component_location['resource_identifier']
)
resource_identifiers = []
missing = []
for component_id, component in list(component_ids_mapping.items()):
if component_id not in resource_identifiers_map:
missing.append(component)
else:
resource_identifiers.append(
resource_identifiers_map[component_id]
)
if missing:
raise ftrack_api.exception.ComponentNotInLocationError(
missing, self
)
return resource_identifiers
def get_filesystem_path(self, component):
'''Return filesystem path for *component*.'''
return self.get_filesystem_paths([component])[0]
def get_filesystem_paths(self, components):
'''Return filesystem paths for *components*.'''
resource_identifiers = self.get_resource_identifiers(components)
filesystem_paths = []
for resource_identifier in resource_identifiers:
filesystem_paths.append(
self.accessor.get_filesystem_path(resource_identifier)
)
return filesystem_paths
def get_url(self, component):
'''Return url for *component*.
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
URL could not be determined from *component* or
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
retrieving URL is not supported by the location's accessor.
'''
resource_identifier = self.get_resource_identifier(component)
return self.accessor.get_url(resource_identifier)
class MemoryLocationMixin(MixinBaseClass):
'''Represent storage for components.
Unlike a standard location, only store metadata for components in this
location in memory rather than persisting to the database.
'''
@property
def _cache(self):
'''Return cache.'''
try:
cache = self.__cache
except AttributeError:
cache = self.__cache = {}
return cache
def _register_component_in_location(self, component, resource_identifier):
'''Register *component* in location with *resource_identifier*.'''
component_id = list(ftrack_api.inspection.primary_key(component).values())[0]
self._cache[component_id] = resource_identifier
def _register_components_in_location(
self, components, resource_identifiers
):
'''Register *components* in location against *resource_identifiers*.
Indices of *components* and *resource_identifiers* should align.
'''
for component, resource_identifier in zip(
components, resource_identifiers
):
self._register_component_in_location(component, resource_identifier)
def _deregister_component_in_location(self, component):
'''Deregister *component* in location.'''
component_id = list(ftrack_api.inspection.primary_key(component).values())[0]
self._cache.pop(component_id)
def _get_resource_identifiers(self, components):
'''Return resource identifiers for *components*.
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any
of the referenced components are not present in this location.
'''
resource_identifiers = []
missing = []
for component in components:
component_id = list(ftrack_api.inspection.primary_key(
component
).values())[0]
resource_identifier = self._cache.get(component_id)
if resource_identifier is None:
missing.append(component)
else:
resource_identifiers.append(resource_identifier)
if missing:
raise ftrack_api.exception.ComponentNotInLocationError(
missing, self
)
return resource_identifiers
class UnmanagedLocationMixin(MixinBaseClass):
'''Location that does not manage data.'''
def _add_data(self, component, resource_identifier, source):
'''Manage transfer of *component* data from *source*.
*resource_identifier* specifies the identifier to use with this
locations accessor.
Overridden to have no effect.
'''
return
def _remove_data(self, component):
'''Remove data associated with *component*.
Overridden to have no effect.
'''
return
class OriginLocationMixin(MemoryLocationMixin, UnmanagedLocationMixin):
'''Special origin location that expects sources as filepaths.'''
def _get_context(self, component, source):
'''Return context for *component* and *source*.'''
context = {}
if source:
context.update(dict(
source_resource_identifier=source
))
return context
class ServerLocationMixin(MixinBaseClass):
'''Location representing ftrack server.
Adds convenience methods to location, specific to ftrack server.
'''
def get_thumbnail_url(self, component, size=None):
'''Return thumbnail url for *component*.
Optionally, specify *size* to constrain the downscaled image to size
x size pixels.
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
URL could not be determined from *resource_identifier* or
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
retrieving URL is not supported by the location's accessor.
'''
resource_identifier = self.get_resource_identifier(component)
return self.accessor.get_thumbnail_url(resource_identifier, size)

View file

@ -0,0 +1,105 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import warnings
from builtins import object
import ftrack_api.entity.base
class Note(ftrack_api.entity.base.Entity):
'''Represent a note.'''
def create_reply(
self, content, author
):
'''Create a reply with *content* and *author*.
.. note::
This is a helper method. To create replies manually use the
standard :meth:`Session.create` method.
'''
reply = self.session.create(
'Note', {
'author': author,
'content': content
}
)
self['replies'].append(reply)
return reply
class CreateNoteMixin(object):
'''Mixin to add create_note method on entity class.'''
def create_note(
self, content, author, recipients=None, category=None, labels=None
):
'''Create note with *content*, *author*.
NoteLabels can be set by including *labels*.
Note category can be set by including *category*.
*recipients* can be specified as a list of user or group instances.
'''
note_label_support = 'NoteLabel' in self.session.types
if not labels:
labels = []
if labels and not note_label_support:
raise ValueError(
'NoteLabel is not supported by the current server version.'
)
if category and labels:
raise ValueError(
'Both category and labels cannot be set at the same time.'
)
if not recipients:
recipients = []
data = {
'content': content,
'author': author
}
if category:
if note_label_support:
labels = [category]
warnings.warn(
'category argument will be removed in an upcoming version, '
'please use labels instead.',
PendingDeprecationWarning
)
else:
data['category_id'] = category['id']
note = self.session.create('Note', data)
self['notes'].append(note)
for resource in recipients:
recipient = self.session.create('Recipient', {
'note_id': note['id'],
'resource_id': resource['id']
})
note['recipients'].append(recipient)
for label in labels:
self.session.create(
'NoteLabelLink',
{
'label_id': label['id'],
'note_id': note['id']
}
)
return note

View file

@ -0,0 +1,94 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api.entity.base
class ProjectSchema(ftrack_api.entity.base.Entity):
'''Class representing ProjectSchema.'''
def get_statuses(self, schema, type_id=None):
'''Return statuses for *schema* and optional *type_id*.
*type_id* is the id of the Type for a TypedContext and can be used to
get statuses where the workflow has been overridden.
'''
# Task has overrides and need to be handled separately.
if schema == 'Task':
if type_id is not None:
overrides = self['_overrides']
for override in overrides:
if override['type_id'] == type_id:
return override['workflow_schema']['statuses'][:]
return self['_task_workflow']['statuses'][:]
elif schema == 'AssetVersion':
return self['_version_workflow']['statuses'][:]
else:
try:
EntityTypeClass = self.session.types[schema]
except KeyError:
raise ValueError('Schema {0} does not exist.'.format(schema))
object_type_id_attribute = EntityTypeClass.attributes.get(
'object_type_id'
)
try:
object_type_id = object_type_id_attribute.default_value
except AttributeError:
raise ValueError(
'Schema {0} does not have statuses.'.format(schema)
)
for _schema in self['_schemas']:
if _schema['type_id'] == object_type_id:
result = self.session.query(
'select task_status from SchemaStatus '
'where schema_id is {0}'.format(_schema['id'])
)
return [
schema_type['task_status'] for schema_type in result
]
raise ValueError(
'No valid statuses were found for schema {0}.'.format(schema)
)
def get_types(self, schema):
'''Return types for *schema*.'''
# Task need to be handled separately.
if schema == 'Task':
return self['_task_type_schema']['types'][:]
else:
try:
EntityTypeClass = self.session.types[schema]
except KeyError:
raise ValueError('Schema {0} does not exist.'.format(schema))
object_type_id_attribute = EntityTypeClass.attributes.get(
'object_type_id'
)
try:
object_type_id = object_type_id_attribute.default_value
except AttributeError:
raise ValueError(
'Schema {0} does not have types.'.format(schema)
)
for _schema in self['_schemas']:
if _schema['type_id'] == object_type_id:
result = self.session.query(
'select task_type from SchemaType '
'where schema_id is {0}'.format(_schema['id'])
)
return [schema_type['task_type'] for schema_type in result]
raise ValueError(
'No valid types were found for schema {0}.'.format(schema)
)

View file

@ -0,0 +1,124 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
from builtins import str
import arrow
import ftrack_api.entity.base
import ftrack_api.exception
class User(ftrack_api.entity.base.Entity):
'''Represent a user.'''
def start_timer(self, context=None, comment='', name=None, force=False):
'''Start a timer for *context* and return it.
*force* can be used to automatically stop an existing timer and create a
timelog for it. If you need to get access to the created timelog, use
:func:`stop_timer` instead.
*comment* and *name* are optional but will be set on the timer.
.. note::
This method will automatically commit the changes and if *force* is
False then it will fail with a
:class:`ftrack_api.exception.NotUniqueError` exception if a
timer is already running.
'''
if force:
try:
self.stop_timer()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Failed to stop existing timer.')
timer = self.session.create('Timer', {
'user': self,
'context': context,
'name': name,
'comment': comment
})
# Commit the new timer and try to catch any error that indicate another
# timelog already exists and inform the user about it.
try:
self.session.commit()
except ftrack_api.exception.ServerError as error:
if 'IntegrityError' in str(error):
raise ftrack_api.exception.NotUniqueError(
('Failed to start a timelog for user with id: {0}, it is '
'likely that a timer is already running. Either use '
'force=True or stop the timer first.').format(self['id'])
)
else:
# Reraise the error as it might be something unrelated.
raise
return timer
def stop_timer(self):
'''Stop the current timer and return a timelog created from it.
If a timer is not running, a
:exc:`ftrack_api.exception.NoResultFoundError` exception will be
raised.
.. note::
This method will automatically commit the changes.
'''
timer = self.session.query(
'Timer where user_id = "{0}"'.format(self['id'])
).one()
# If the server is running in the same timezone as the local
# timezone, we remove the TZ offset to get the correct duration.
is_timezone_support_enabled = self.session.server_information.get(
'is_timezone_support_enabled', None
)
if is_timezone_support_enabled is None:
self.logger.warning(
'Could not identify if server has timezone support enabled. '
'Will assume server is running in UTC.'
)
is_timezone_support_enabled = True
if is_timezone_support_enabled:
now = arrow.now()
else:
now = arrow.now().replace(tzinfo='utc')
delta = now - timer['start']
duration = delta.days * 24 * 60 * 60 + delta.seconds
timelog = self.session.create('Timelog', {
'user_id': timer['user_id'],
'context_id': timer['context_id'],
'comment': timer['comment'],
'start': timer['start'],
'duration': duration,
'name': timer['name']
})
self.session.delete(timer)
self.session.commit()
return timelog
def send_invite(self):
'''Send a invation email to the user'''
self.session.send_user_invite(
self
)
def reset_api_key(self):
'''Reset the users api key.'''
response = self.session.reset_remote(
'api_key', entity=self
)
return response['api_key']

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,86 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import str
import uuid
import collections
class Event(collections.MutableMapping):
'''Represent a single event.'''
def __init__(self, topic, id=None, data=None, sent=None,
source=None, target='', in_reply_to_event=None):
'''Initialise event.
*topic* is the required topic for the event. It can use a dotted
notation to demarcate groupings. For example, 'ftrack.update'.
*id* is the unique id for this event instance. It is primarily used when
replying to an event. If not supplied a default uuid based value will
be used.
*data* refers to event specific data. It should be a mapping structure
and defaults to an empty dictionary if not supplied.
*sent* is the timestamp the event is sent. It will be set automatically
as send time unless specified here.
*source* is information about where the event originated. It should be
a mapping and include at least a unique id value under an 'id' key. If
not specified, senders usually populate the value automatically at
publish time.
*target* can be an expression that targets this event. For example,
a reply event would target the event to the sender of the source event.
The expression will be tested against subscriber information only.
*in_reply_to_event* is used when replying to an event and should contain
the unique id of the event being replied to.
'''
super(Event, self).__init__()
self._data = dict(
id=id or uuid.uuid4().hex,
data=data or {},
topic=topic,
sent=sent,
source=source or {},
target=target,
in_reply_to_event=in_reply_to_event
)
self._stopped = False
def stop(self):
'''Stop further processing of this event.'''
self._stopped = True
def is_stopped(self):
'''Return whether event has been stopped.'''
return self._stopped
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data)

View file

@ -0,0 +1,285 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import map
from six import string_types
from builtins import object
from operator import eq, ne, ge, le, gt, lt
from pyparsing import (Group, Word, CaselessKeyword, Forward,
FollowedBy, Suppress, oneOf, OneOrMore, Optional,
alphanums, quotedString, removeQuotes)
import ftrack_api.exception
# Do not enable packrat since it is not thread-safe and will result in parsing
# exceptions in a multi threaded environment.
# ParserElement.enablePackrat()
class Parser(object):
'''Parse string based expression into :class:`Expression` instance.'''
def __init__(self):
'''Initialise parser.'''
self._operators = {
'=': eq,
'!=': ne,
'>=': ge,
'<=': le,
'>': gt,
'<': lt
}
self._parser = self._construct_parser()
super(Parser, self).__init__()
def _construct_parser(self):
'''Construct and return parser.'''
field = Word(alphanums + '_.')
operator = oneOf(list(self._operators.keys()))
value = Word(alphanums + '-_,./*@+')
quoted_value = quotedString('quoted_value').setParseAction(removeQuotes)
condition = Group(
field + operator + (quoted_value | value)
)('condition')
not_ = Optional(Suppress(CaselessKeyword('not')))('not')
and_ = Suppress(CaselessKeyword('and'))('and')
or_ = Suppress(CaselessKeyword('or'))('or')
expression = Forward()
parenthesis = Suppress('(') + expression + Suppress(')')
previous = condition | parenthesis
for conjunction in (not_, and_, or_):
current = Forward()
if conjunction in (and_, or_):
conjunction_expression = (
FollowedBy(previous + conjunction + previous)
+ Group(
previous + OneOrMore(conjunction + previous)
)(conjunction.resultsName)
)
elif conjunction in (not_, ):
conjunction_expression = (
FollowedBy(conjunction.expr + current)
+ Group(conjunction + current)(conjunction.resultsName)
)
else: # pragma: no cover
raise ValueError('Unrecognised conjunction.')
current <<= (conjunction_expression | previous)
previous = current
expression <<= previous
return expression('expression')
def parse(self, expression):
'''Parse string *expression* into :class:`Expression`.
Raise :exc:`ftrack_api.exception.ParseError` if *expression* could
not be parsed.
'''
result = None
expression = expression.strip()
if expression:
try:
result = self._parser.parseString(
expression, parseAll=True
)
except Exception as error:
raise ftrack_api.exception.ParseError(
'Failed to parse: {0}. {1}'.format(expression, error)
)
return self._process(result)
def _process(self, result):
'''Process *result* using appropriate method.
Method called is determined by the name of the result.
'''
method_name = '_process_{0}'.format(result.getName())
method = getattr(self, method_name)
return method(result)
def _process_expression(self, result):
'''Process *result* as expression.'''
return self._process(result[0])
def _process_not(self, result):
'''Process *result* as NOT operation.'''
return Not(self._process(result[0]))
def _process_and(self, result):
'''Process *result* as AND operation.'''
return All([self._process(entry) for entry in result])
def _process_or(self, result):
'''Process *result* as OR operation.'''
return Any([self._process(entry) for entry in result])
def _process_condition(self, result):
'''Process *result* as condition.'''
key, operator, value = result
return Condition(key, self._operators[operator], value)
def _process_quoted_value(self, result):
'''Process *result* as quoted value.'''
return result
class Expression(object):
'''Represent a structured expression to test candidates against.'''
def __str__(self):
'''Return string representation.'''
return '<{0}>'.format(self.__class__.__name__)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return True
class All(Expression):
'''Match candidate that matches all of the specified expressions.
.. note::
If no expressions are supplied then will always match.
'''
def __init__(self, expressions=None):
'''Initialise with list of *expressions* to match against.'''
self._expressions = expressions or []
super(All, self).__init__()
def __str__(self):
'''Return string representation.'''
return '<{0} [{1}]>'.format(
self.__class__.__name__,
' '.join(map(str, self._expressions))
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return all([
expression.match(candidate) for expression in self._expressions
])
class Any(Expression):
'''Match candidate that matches any of the specified expressions.
.. note::
If no expressions are supplied then will never match.
'''
def __init__(self, expressions=None):
'''Initialise with list of *expressions* to match against.'''
self._expressions = expressions or []
super(Any, self).__init__()
def __str__(self):
'''Return string representation.'''
return '<{0} [{1}]>'.format(
self.__class__.__name__,
' '.join(map(str, self._expressions))
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return any([
expression.match(candidate) for expression in self._expressions
])
class Not(Expression):
'''Negate expression.'''
def __init__(self, expression):
'''Initialise with *expression* to negate.'''
self._expression = expression
super(Not, self).__init__()
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__,
self._expression
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
return not self._expression.match(candidate)
class Condition(Expression):
'''Represent condition.'''
def __init__(self, key, operator, value):
'''Initialise condition.
*key* is the key to check on the data when matching. It can be a nested
key represented by dots. For example, 'data.eventType' would attempt to
match candidate['data']['eventType']. If the candidate is missing any
of the requested keys then the match fails immediately.
*operator* is the operator function to use to perform the match between
the retrieved candidate value and the conditional *value*.
If *value* is a string, it can use a wildcard '*' at the end to denote
that any values matching the substring portion are valid when matching
equality only.
'''
self._key = key
self._operator = operator
self._value = value
self._wildcard = '*'
self._operatorMapping = {
eq: '=',
ne: '!=',
ge: '>=',
le: '<=',
gt: '>',
lt: '<'
}
def __str__(self):
'''Return string representation.'''
return '<{0} {1}{2}{3}>'.format(
self.__class__.__name__,
self._key,
self._operatorMapping.get(self._operator, self._operator),
self._value
)
def match(self, candidate):
'''Return whether *candidate* satisfies this expression.'''
key_parts = self._key.split('.')
try:
value = candidate
for keyPart in key_parts:
value = value[keyPart]
except (KeyError, TypeError):
return False
if (
self._operator is eq
and isinstance(self._value, string_types)
and self._value[-1] == self._wildcard
):
return self._value[:-1] in value
else:
return self._operator(value, self._value)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,28 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import object
import ftrack_api.event.subscription
class Subscriber(object):
'''Represent event subscriber.'''
def __init__(self, subscription, callback, metadata, priority):
'''Initialise subscriber.'''
self.subscription = ftrack_api.event.subscription.Subscription(
subscription
)
self.callback = callback
self.metadata = metadata
self.priority = priority
def __str__(self):
'''Return string representation.'''
return '<{0} metadata={1} subscription="{2}">'.format(
self.__class__.__name__, self.metadata, self.subscription
)
def interested_in(self, event):
'''Return whether subscriber interested in *event*.'''
return self.subscription.includes(event)

View file

@ -0,0 +1,24 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import object
import ftrack_api.event.expression
class Subscription(object):
'''Represent a subscription.'''
parser = ftrack_api.event.expression.Parser()
def __init__(self, subscription):
'''Initialise with *subscription*.'''
self._subscription = subscription
self._expression = self.parser.parse(subscription)
def __str__(self):
'''Return string representation.'''
return self._subscription
def includes(self, event):
'''Return whether subscription includes *event*.'''
return self._expression.match(event)

View file

@ -0,0 +1,393 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import str
import sys
import traceback
import ftrack_api.entity.base
class Error(Exception):
'''ftrack specific error.'''
default_message = 'Unspecified error occurred.'
def __init__(self, message=None, details=None):
'''Initialise exception with *message*.
If *message* is None, the class 'default_message' will be used.
*details* should be a mapping of extra information that can be used in
the message and also to provide more context.
'''
if message is None:
message = self.default_message
self.message = message
self.details = details
if self.details is None:
self.details = {}
self.traceback = traceback.format_exc()
def __str__(self):
'''Return string representation.'''
keys = {}
for key, value in self.details.items():
if isinstance(value, str):
value = value.encode(sys.getfilesystemencoding())
keys[key] = value
return str(self.message.format(**keys))
class AuthenticationError(Error):
'''Raise when an authentication error occurs.'''
default_message = 'Authentication error.'
class ServerError(Error):
'''Raise when the server reports an error.'''
default_message = 'Server reported error processing request.'
class ServerCompatibilityError(ServerError):
'''Raise when server appears incompatible.'''
default_message = 'Server incompatible.'
class NotFoundError(Error):
'''Raise when something that should exist is not found.'''
default_message = 'Not found.'
class NotUniqueError(Error):
'''Raise when unique value required and duplicate detected.'''
default_message = 'Non-unique value detected.'
class IncorrectResultError(Error):
'''Raise when a result is incorrect.'''
default_message = 'Incorrect result detected.'
class NoResultFoundError(IncorrectResultError):
'''Raise when a result was expected but no result was found.'''
default_message = 'Expected result, but no result was found.'
class MultipleResultsFoundError(IncorrectResultError):
'''Raise when a single result expected, but multiple results found.'''
default_message = 'Expected single result, but received multiple results.'
class EntityTypeError(Error):
'''Raise when an entity type error occurs.'''
default_message = 'Entity type error.'
class UnrecognisedEntityTypeError(EntityTypeError):
'''Raise when an unrecognised entity type detected.'''
default_message = 'Entity type "{entity_type}" not recognised.'
def __init__(self, entity_type, **kw):
'''Initialise with *entity_type* that is unrecognised.'''
kw.setdefault('details', {}).update(dict(
entity_type=entity_type
))
super(UnrecognisedEntityTypeError, self).__init__(**kw)
class OperationError(Error):
'''Raise when an operation error occurs.'''
default_message = 'Operation error.'
class InvalidStateError(Error):
'''Raise when an invalid state detected.'''
default_message = 'Invalid state.'
class InvalidStateTransitionError(InvalidStateError):
'''Raise when an invalid state transition detected.'''
default_message = (
'Invalid transition from {current_state!r} to {target_state!r} state '
'for entity {entity!r}'
)
def __init__(self, current_state, target_state, entity, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
current_state=current_state,
target_state=target_state,
entity=entity
))
super(InvalidStateTransitionError, self).__init__(**kw)
class AttributeError(Error):
'''Raise when an error related to an attribute occurs.'''
default_message = 'Attribute error.'
class ImmutableAttributeError(AttributeError):
'''Raise when modification of immutable attribute attempted.'''
default_message = (
'Cannot modify value of immutable {attribute.name!r} attribute.'
)
def __init__(self, attribute, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
attribute=attribute
))
super(ImmutableAttributeError, self).__init__(**kw)
class CollectionError(Error):
'''Raise when an error related to collections occurs.'''
default_message = 'Collection error.'
def __init__(self, collection, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
collection=collection
))
super(CollectionError, self).__init__(**kw)
class ImmutableCollectionError(CollectionError):
'''Raise when modification of immutable collection attempted.'''
default_message = (
'Cannot modify value of immutable collection {collection!r}.'
)
class DuplicateItemInCollectionError(CollectionError):
'''Raise when duplicate item in collection detected.'''
default_message = (
'Item {item!r} already exists in collection {collection!r}.'
)
def __init__(self, item, collection, **kw):
'''Initialise error.'''
kw.setdefault('details', {}).update(dict(
item=item
))
super(DuplicateItemInCollectionError, self).__init__(collection, **kw)
class ParseError(Error):
'''Raise when a parsing error occurs.'''
default_message = 'Failed to parse.'
class EventHubError(Error):
'''Raise when issues related to event hub occur.'''
default_message = 'Event hub error occurred.'
class EventHubConnectionError(EventHubError):
'''Raise when event hub encounters connection problem.'''
default_message = 'Event hub is not connected.'
class EventHubPacketError(EventHubError):
'''Raise when event hub encounters an issue with a packet.'''
default_message = 'Invalid packet.'
class PermissionDeniedError(Error):
'''Raise when permission is denied.'''
default_message = 'Permission denied.'
class LocationError(Error):
'''Base for errors associated with locations.'''
default_message = 'Unspecified location error'
class ComponentNotInAnyLocationError(LocationError):
'''Raise when component not available in any location.'''
default_message = 'Component not available in any location.'
class ComponentNotInLocationError(LocationError):
'''Raise when component(s) not in location.'''
default_message = (
'Component(s) {formatted_components} not found in location {location}.'
)
def __init__(self, components, location, **kw):
'''Initialise with *components* and *location*.'''
if isinstance(components, ftrack_api.entity.base.Entity):
components = [components]
kw.setdefault('details', {}).update(dict(
components=components,
formatted_components=', '.join(
[str(component) for component in components]
),
location=location
))
super(ComponentNotInLocationError, self).__init__(**kw)
class ComponentInLocationError(LocationError):
'''Raise when component(s) already exists in location.'''
default_message = (
'Component(s) {formatted_components} already exist in location '
'{location}.'
)
def __init__(self, components, location, **kw):
'''Initialise with *components* and *location*.'''
if isinstance(components, ftrack_api.entity.base.Entity):
components = [components]
kw.setdefault('details', {}).update(dict(
components=components,
formatted_components=', '.join(
[str(component) for component in components]
),
location=location
))
super(ComponentInLocationError, self).__init__(**kw)
class AccessorError(Error):
'''Base for errors associated with accessors.'''
default_message = 'Unspecified accessor error'
class AccessorOperationFailedError(AccessorError):
'''Base for failed operations on accessors.'''
default_message = 'Operation {operation} failed: {error}'
def __init__(
self, operation='', resource_identifier=None, error=None, **kw
):
kw.setdefault('details', {}).update(dict(
operation=operation,
resource_identifier=resource_identifier,
error=error
))
super(AccessorOperationFailedError, self).__init__(**kw)
class AccessorUnsupportedOperationError(AccessorOperationFailedError):
'''Raise when operation is unsupported.'''
default_message = 'Operation {operation} unsupported.'
class AccessorPermissionDeniedError(AccessorOperationFailedError):
'''Raise when permission denied.'''
default_message = (
'Cannot {operation} {resource_identifier}. Permission denied.'
)
class AccessorResourceIdentifierError(AccessorError):
'''Raise when a error related to a resource_identifier occurs.'''
default_message = 'Resource identifier is invalid: {resource_identifier}.'
def __init__(self, resource_identifier, **kw):
kw.setdefault('details', {}).update(dict(
resource_identifier=resource_identifier
))
super(AccessorResourceIdentifierError, self).__init__(**kw)
class AccessorFilesystemPathError(AccessorResourceIdentifierError):
'''Raise when a error related to an accessor filesystem path occurs.'''
default_message = (
'Could not determine filesystem path from resource identifier: '
'{resource_identifier}.'
)
class AccessorResourceError(AccessorError):
'''Base for errors associated with specific resource.'''
default_message = 'Unspecified resource error: {resource_identifier}'
def __init__(self, operation='', resource_identifier=None, error=None,
**kw):
kw.setdefault('details', {}).update(dict(
operation=operation,
resource_identifier=resource_identifier
))
super(AccessorResourceError, self).__init__(**kw)
class AccessorResourceNotFoundError(AccessorResourceError):
'''Raise when a required resource is not found.'''
default_message = 'Resource not found: {resource_identifier}'
class AccessorParentResourceNotFoundError(AccessorResourceError):
'''Raise when a parent resource (such as directory) is not found.'''
default_message = 'Parent resource is missing: {resource_identifier}'
class AccessorResourceInvalidError(AccessorResourceError):
'''Raise when a resource is not the right type.'''
default_message = 'Resource invalid: {resource_identifier}'
class AccessorContainerNotEmptyError(AccessorResourceError):
'''Raise when container is not empty.'''
default_message = 'Container is not empty: {resource_identifier}'
class StructureError(Error):
'''Base for errors associated with structures.'''
default_message = 'Unspecified structure error'
class ConnectionClosedError(Error):
'''Raise when attempt to use closed connection detected.'''
default_message = "Connection closed."

View file

@ -0,0 +1,132 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import str
import termcolor
import ftrack_api.entity.base
import ftrack_api.collection
import ftrack_api.symbol
import ftrack_api.inspection
#: Useful filters to pass to :func:`format`.`
FILTER = {
'ignore_unset': (
lambda entity, name, value: value is not ftrack_api.symbol.NOT_SET
)
}
def format(
entity, formatters=None, attribute_filter=None, recursive=False,
indent=0, indent_first_line=True, _seen=None
):
'''Return formatted string representing *entity*.
*formatters* can be used to customise formatting of elements. It should be a
mapping with one or more of the following keys:
* header - Used to format entity type.
* label - Used to format attribute names.
Specify an *attribute_filter* to control which attributes to include. By
default all attributes are included. The *attribute_filter* should be a
callable that accepts `(entity, attribute_name, attribute_value)` and
returns True if the attribute should be included in the output. For example,
to filter out all unset values::
attribute_filter=ftrack_api.formatter.FILTER['ignore_unset']
If *recursive* is True then recurse into Collections and format each entity
present.
*indent* specifies the overall indentation in spaces of the formatted text,
whilst *indent_first_line* determines whether to apply that indent to the
first generated line.
.. warning::
Iterates over all *entity* attributes which may cause multiple queries
to the server. Turn off auto populating in the session to prevent this.
'''
# Initialise default formatters.
if formatters is None:
formatters = dict()
formatters.setdefault(
'header', lambda text: termcolor.colored(
text, 'white', 'on_blue', attrs=['bold']
)
)
formatters.setdefault(
'label', lambda text: termcolor.colored(
text, 'blue', attrs=['bold']
)
)
# Determine indents.
spacer = ' ' * indent
if indent_first_line:
first_line_spacer = spacer
else:
first_line_spacer = ''
# Avoid infinite recursion on circular references.
if _seen is None:
_seen = set()
identifier = str(ftrack_api.inspection.identity(entity))
if identifier in _seen:
return (
first_line_spacer +
formatters['header'](entity.entity_type) + '{...}'
)
_seen.add(identifier)
information = list()
information.append(
first_line_spacer + formatters['header'](entity.entity_type)
)
for key, value in sorted(entity.items()):
if attribute_filter is not None:
if not attribute_filter(entity, key, value):
continue
child_indent = indent + len(key) + 3
if isinstance(value, ftrack_api.entity.base.Entity):
value = format(
value,
formatters=formatters,
attribute_filter=attribute_filter,
recursive=recursive,
indent=child_indent,
indent_first_line=False,
_seen=_seen.copy()
)
if isinstance(value, ftrack_api.collection.Collection):
if recursive:
child_values = []
for index, child in enumerate(value):
child_value = format(
child,
formatters=formatters,
attribute_filter=attribute_filter,
recursive=recursive,
indent=child_indent,
indent_first_line=index != 0,
_seen=_seen.copy()
)
child_values.append(child_value)
value = '\n'.join(child_values)
information.append(
spacer + u' {0}: {1}'.format(formatters['label'](key), value)
)
return '\n'.join(information)

View file

@ -0,0 +1,138 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
from builtins import str
from future.utils import native_str
import collections
import ftrack_api.symbol
import ftrack_api.operation
def identity(entity):
'''Return unique identity of *entity*.'''
return (
str(entity.entity_type),
list(primary_key(entity).values())
)
def primary_key(entity):
'''Return primary key of *entity* as an ordered mapping of {field: value}.
To get just the primary key values::
primary_key(entity).values()
'''
primary_key = collections.OrderedDict()
for name in entity.primary_key_attributes:
value = entity[name]
if value is ftrack_api.symbol.NOT_SET:
raise KeyError(
'Missing required value for primary key attribute "{0}" on '
'entity {1!r}.'.format(name, entity)
)
# todo: Compatiblity fix, review for better implementation.
primary_key[native_str(name)] = native_str(value)
return primary_key
def _state(operation, state):
'''Return state following *operation* against current *state*.'''
if (
isinstance(
operation, ftrack_api.operation.CreateEntityOperation
)
and state is ftrack_api.symbol.NOT_SET
):
state = ftrack_api.symbol.CREATED
elif (
isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
)
and state is ftrack_api.symbol.NOT_SET
):
state = ftrack_api.symbol.MODIFIED
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
state = ftrack_api.symbol.DELETED
return state
def state(entity):
'''Return current *entity* state.
.. seealso:: :func:`ftrack_api.inspection.states`.
'''
value = ftrack_api.symbol.NOT_SET
for operation in entity.session.recorded_operations:
# Determine if operation refers to an entity and whether that entity
# is *entity*.
if (
isinstance(
operation,
(
ftrack_api.operation.CreateEntityOperation,
ftrack_api.operation.UpdateEntityOperation,
ftrack_api.operation.DeleteEntityOperation
)
)
and operation.entity_type == entity.entity_type
and operation.entity_key == primary_key(entity)
):
value = _state(operation, value)
return value
def states(entities):
'''Return current states of *entities*.
An optimised function for determining states of multiple entities in one
go.
.. note::
All *entities* should belong to the same session.
.. seealso:: :func:`ftrack_api.inspection.state`.
'''
if not entities:
return []
session = entities[0].session
entities_by_identity = collections.OrderedDict()
for entity in entities:
key = (entity.entity_type, str(list(primary_key(entity).values())))
entities_by_identity[key] = ftrack_api.symbol.NOT_SET
for operation in session.recorded_operations:
if (
isinstance(
operation,
(
ftrack_api.operation.CreateEntityOperation,
ftrack_api.operation.UpdateEntityOperation,
ftrack_api.operation.DeleteEntityOperation
)
)
):
key = (operation.entity_type, str(list(operation.entity_key.values())))
if key not in entities_by_identity:
continue
value = _state(operation, entities_by_identity[key])
entities_by_identity[key] = value
return list(entities_by_identity.values())

View file

@ -0,0 +1,43 @@
# :coding: utf-8
# :copyright: Copyright (c) 2016 ftrack
from builtins import object
import functools
import warnings
def deprecation_warning(message):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
warnings.warn(
message,
PendingDeprecationWarning
)
return function(*args, **kwargs)
return wrapper
return decorator
class LazyLogMessage(object):
'''A log message that can be evaluated lazily for improved performance.
Example::
# Formatting of string will not occur unless debug logging enabled.
logger.debug(LazyLogMessage(
'Hello {0}', 'world'
))
'''
def __init__(self, message, *args, **kwargs):
'''Initialise with *message* format string and arguments.'''
self.message = message
self.args = args
self.kwargs = kwargs
def __str__(self):
'''Return string representation.'''
return self.message.format(*self.args, **self.kwargs)

View file

@ -0,0 +1,116 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
from builtins import object
import copy
class Operations(object):
'''Stack of operations.'''
def __init__(self):
'''Initialise stack.'''
self._stack = []
super(Operations, self).__init__()
def clear(self):
'''Clear all operations.'''
del self._stack[:]
def push(self, operation):
'''Push *operation* onto stack.'''
self._stack.append(operation)
def pop(self):
'''Pop and return most recent operation from stack.'''
return self._stack.pop()
def __len__(self):
'''Return count of operations.'''
return len(self._stack)
def __iter__(self):
'''Return iterator over operations.'''
return iter(self._stack)
class Operation(object):
'''Represent an operation.'''
class CreateEntityOperation(Operation):
'''Represent create entity operation.'''
def __init__(self, entity_type, entity_key, entity_data):
'''Initialise operation.
*entity_type* should be the type of entity in string form (as returned
from :attr:`ftrack_api.entity.base.Entity.entity_type`).
*entity_key* should be the unique key for the entity and should follow
the form returned from :func:`ftrack_api.inspection.primary_key`.
*entity_data* should be a mapping of the initial data to populate the
entity with when creating.
.. note::
Shallow copies will be made of each value in *entity_data*.
'''
super(CreateEntityOperation, self).__init__()
self.entity_type = entity_type
self.entity_key = entity_key
self.entity_data = {}
for key, value in list(entity_data.items()):
self.entity_data[key] = copy.copy(value)
class UpdateEntityOperation(Operation):
'''Represent update entity operation.'''
def __init__(
self, entity_type, entity_key, attribute_name, old_value, new_value
):
'''Initialise operation.
*entity_type* should be the type of entity in string form (as returned
from :attr:`ftrack_api.entity.base.Entity.entity_type`).
*entity_key* should be the unique key for the entity and should follow
the form returned from :func:`ftrack_api.inspection.primary_key`.
*attribute_name* should be the string name of the attribute being
modified and *old_value* and *new_value* should reflect the change in
value.
.. note::
Shallow copies will be made of both *old_value* and *new_value*.
'''
super(UpdateEntityOperation, self).__init__()
self.entity_type = entity_type
self.entity_key = entity_key
self.attribute_name = attribute_name
self.old_value = copy.copy(old_value)
self.new_value = copy.copy(new_value)
class DeleteEntityOperation(Operation):
'''Represent delete entity operation.'''
def __init__(self, entity_type, entity_key):
'''Initialise operation.
*entity_type* should be the type of entity in string form (as returned
from :attr:`ftrack_api.entity.base.Entity.entity_type`).
*entity_key* should be the unique key for the entity and should follow
the form returned from :func:`ftrack_api.inspection.primary_key`.
'''
super(DeleteEntityOperation, self).__init__()
self.entity_type = entity_type
self.entity_key = entity_key

View file

@ -0,0 +1,123 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import logging
import os
import uuid
import imp
import inspect
import traceback
def discover(paths, positional_arguments=None, keyword_arguments=None):
'''Find and load plugins in search *paths*.
Each discovered module should implement a register function that accepts
*positional_arguments* and *keyword_arguments* as \*args and \*\*kwargs
respectively.
If a register function does not accept variable arguments, then attempt to
only pass accepted arguments to the function by inspecting its signature.
'''
logger = logging.getLogger(__name__ + '.discover')
if positional_arguments is None:
positional_arguments = []
if keyword_arguments is None:
keyword_arguments = {}
for path in paths:
# Ignore empty paths that could resolve to current directory.
path = path.strip()
if not path:
continue
for base, directories, filenames in os.walk(path):
for filename in filenames:
name, extension = os.path.splitext(filename)
if extension != '.py':
continue
module_path = os.path.join(base, filename)
unique_name = uuid.uuid4().hex
try:
module = imp.load_source(unique_name, module_path)
except Exception as error:
logger.warning(
'Failed to load plugin from "{0}": {1}'
.format(module_path, error)
)
logger.debug(
traceback.format_exc())
continue
try:
module.register
except AttributeError:
logger.warning(
'Failed to load plugin that did not define a '
'"register" function at the module level: {0}'
.format(module_path)
)
else:
# Attempt to only pass arguments that are accepted by the
# register function.
specification = inspect.getargspec(module.register)
selected_positional_arguments = positional_arguments
selected_keyword_arguments = keyword_arguments
if (
not specification.varargs and
len(positional_arguments) > len(specification.args)
):
logger.warning(
'Culling passed arguments to match register '
'function signature.'
)
selected_positional_arguments = positional_arguments[
len(specification.args):
]
selected_keyword_arguments = {}
elif not specification.keywords:
# Remove arguments that have been passed as positionals.
remainder = specification.args[
len(positional_arguments):
]
# Determine remaining available keyword arguments.
defined_keyword_arguments = []
if specification.defaults:
defined_keyword_arguments = specification.args[
-len(specification.defaults):
]
remaining_keyword_arguments = set([
keyword_argument for keyword_argument
in defined_keyword_arguments
if keyword_argument in remainder
])
if not set(keyword_arguments.keys()).issubset(
remaining_keyword_arguments
):
logger.warning(
'Culling passed arguments to match register '
'function signature.'
)
selected_keyword_arguments = {
key: value
for key, value in list(keyword_arguments.items())
if key in remaining_keyword_arguments
}
module.register(
*selected_positional_arguments,
**selected_keyword_arguments
)

View file

@ -0,0 +1,202 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import re
import collections
import ftrack_api.exception
class QueryResult(collections.Sequence):
'''Results from a query.'''
OFFSET_EXPRESSION = re.compile('(?P<offset>offset (?P<value>\d+))')
LIMIT_EXPRESSION = re.compile('(?P<limit>limit (?P<value>\d+))')
def __init__(self, session, expression, page_size=500):
'''Initialise result set.
*session* should be an instance of :class:`ftrack_api.session.Session`
that will be used for executing the query *expression*.
*page_size* should be an integer specifying the maximum number of
records to fetch in one request allowing the results to be fetched
incrementally in a transparent manner for optimal performance. Any
offset or limit specified in *expression* are honoured for final result
set, but intermediate queries may be issued with different offsets and
limits in order to fetch pages. When an embedded limit is smaller than
the given *page_size* it will be used instead and no paging will take
place.
.. warning::
Setting *page_size* to a very large amount may negatively impact
performance of not only the caller, but the server in general.
'''
super(QueryResult, self).__init__()
self._session = session
self._results = []
(
self._expression,
self._offset,
self._limit
) = self._extract_offset_and_limit(expression)
self._page_size = page_size
if self._limit is not None and self._limit < self._page_size:
# Optimise case where embedded limit is less than fetching a
# single page.
self._page_size = self._limit
self._next_offset = self._offset
if self._next_offset is None:
# Initialise with zero offset.
self._next_offset = 0
def _extract_offset_and_limit(self, expression):
'''Process *expression* extracting offset and limit.
Return (expression, offset, limit).
'''
offset = None
match = self.OFFSET_EXPRESSION.search(expression)
if match:
offset = int(match.group('value'))
expression = (
expression[:match.start('offset')] +
expression[match.end('offset'):]
)
limit = None
match = self.LIMIT_EXPRESSION.search(expression)
if match:
limit = int(match.group('value'))
expression = (
expression[:match.start('limit')] +
expression[match.end('limit'):]
)
return expression.strip(), offset, limit
def __getitem__(self, index):
'''Return value at *index*.'''
while self._can_fetch_more() and index >= len(self._results):
self._fetch_more()
return self._results[index]
def __len__(self):
'''Return number of items.'''
while self._can_fetch_more():
self._fetch_more()
return len(self._results)
def _can_fetch_more(self):
'''Return whether more results are available to fetch.'''
return self._next_offset is not None
def _fetch_more(self):
'''Fetch next page of results if available.'''
if not self._can_fetch_more():
return
expression = '{0} offset {1} limit {2}'.format(
self._expression, self._next_offset, self._page_size
)
records, metadata = self._session._query(expression)
self._results.extend(records)
if self._limit is not None and (len(self._results) >= self._limit):
# Original limit reached.
self._next_offset = None
del self._results[self._limit:]
else:
# Retrieve next page offset from returned metadata.
self._next_offset = metadata.get('next', {}).get('offset', None)
def all(self):
'''Fetch and return all data.'''
return list(self)
def one(self):
'''Return exactly one single result from query by applying a limit.
Raise :exc:`ValueError` if an existing limit is already present in the
expression.
Raise :exc:`ValueError` if an existing offset is already present in the
expression as offset is inappropriate when expecting a single item.
Raise :exc:`~ftrack_api.exception.MultipleResultsFoundError` if more
than one result was available or
:exc:`~ftrack_api.exception.NoResultFoundError` if no results were
available.
.. note::
Both errors subclass
:exc:`~ftrack_api.exception.IncorrectResultError` if you want to
catch only one error type.
'''
expression = self._expression
if self._limit is not None:
raise ValueError(
'Expression already contains a limit clause.'
)
if self._offset is not None:
raise ValueError(
'Expression contains an offset clause which does not make '
'sense when selecting a single item.'
)
# Apply custom limit as optimisation. A limit of 2 is used rather than
# 1 so that it is possible to test for multiple matching entries
# case.
expression += ' limit 2'
results, metadata = self._session._query(expression)
if not results:
raise ftrack_api.exception.NoResultFoundError()
if len(results) != 1:
raise ftrack_api.exception.MultipleResultsFoundError()
return results[0]
def first(self):
'''Return first matching result from query by applying a limit.
Raise :exc:`ValueError` if an existing limit is already present in the
expression.
If no matching result available return None.
'''
expression = self._expression
if self._limit is not None:
raise ValueError(
'Expression already contains a limit clause.'
)
# Apply custom offset if present.
if self._offset is not None:
expression += ' offset {0}'.format(self._offset)
# Apply custom limit as optimisation.
expression += ' limit 1'
results, metadata = self._session._query(expression)
if results:
return results[0]
return None

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,51 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import object
class ResourceIdentifierTransformer(object):
'''Transform resource identifiers.
Provide ability to modify resource identifier before it is stored centrally
(:meth:`encode`), or after it has been retrieved, but before it is used
locally (:meth:`decode`).
For example, you might want to decompose paths into a set of key, value
pairs to store centrally and then compose a path from those values when
reading back.
.. note::
This is separate from any transformations an
:class:`ftrack_api.accessor.base.Accessor` may perform and is targeted
towards common transformations.
'''
def __init__(self, session):
'''Initialise resource identifier transformer.
*session* should be the :class:`ftrack_api.session.Session` instance
to use for communication with the server.
'''
self.session = session
super(ResourceIdentifierTransformer, self).__init__()
def encode(self, resource_identifier, context=None):
'''Return encoded *resource_identifier* for storing centrally.
A mapping of *context* values may be supplied to guide the
transformation.
'''
return resource_identifier
def decode(self, resource_identifier, context=None):
'''Return decoded *resource_identifier* for use locally.
A mapping of *context* values may be supplied to guide the
transformation.
'''
return resource_identifier

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,2 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack

View file

@ -0,0 +1,38 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from builtins import object
from abc import ABCMeta, abstractmethod
from future.utils import with_metaclass
class Structure(with_metaclass(ABCMeta, object)):
'''Structure plugin interface.
A structure plugin should compute appropriate paths for data.
'''
def __init__(self, prefix=''):
'''Initialise structure.'''
self.prefix = prefix
self.path_separator = '/'
super(Structure, self).__init__()
@abstractmethod
def get_resource_identifier(self, entity, context=None):
'''Return a resource identifier for supplied *entity*.
*context* can be a mapping that supplies additional information.
'''
def _get_sequence_expression(self, sequence):
'''Return a sequence expression for *sequence* component.'''
padding = sequence['padding']
if padding:
expression = '%0{0}d'.format(padding)
else:
expression = '%d'
return expression

View file

@ -0,0 +1,12 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
import ftrack_api.structure.base
class EntityIdStructure(ftrack_api.structure.base.Structure):
'''Entity id pass-through structure.'''
def get_resource_identifier(self, entity, context=None):
'''Return a *resourceIdentifier* for supplied *entity*.'''
return entity['id']

View file

@ -0,0 +1,91 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import os
import ftrack_api.symbol
import ftrack_api.structure.base
class IdStructure(ftrack_api.structure.base.Structure):
'''Id based structure supporting Components only.
A components unique id will be used to form a path to store the data at.
To avoid millions of entries in one directory each id is chunked into four
prefix directories with the remainder used to name the file::
/prefix/1/2/3/4/56789
If the component has a defined filetype it will be added to the path::
/prefix/1/2/3/4/56789.exr
Components that are children of container components will be placed inside
the id structure of their parent::
/prefix/1/2/3/4/56789/355827648d.exr
/prefix/1/2/3/4/56789/ajf24215b5.exr
However, sequence children will be named using their label as an index and
a common prefix of 'file.'::
/prefix/1/2/3/4/56789/file.0001.exr
/prefix/1/2/3/4/56789/file.0002.exr
'''
def get_resource_identifier(self, entity, context=None):
'''Return a resource identifier for supplied *entity*.
*context* can be a mapping that supplies additional information.
'''
if entity.entity_type in ('FileComponent',):
# When in a container, place the file inside a directory named
# after the container.
container = entity['container']
if container and container is not ftrack_api.symbol.NOT_SET:
path = self.get_resource_identifier(container)
if container.entity_type in ('SequenceComponent',):
# Label doubles as index for now.
name = 'file.{0}{1}'.format(
entity['name'], entity['file_type']
)
parts = [os.path.dirname(path), name]
else:
# Just place uniquely identified file into directory
name = entity['id'] + entity['file_type']
parts = [path, name]
else:
name = entity['id'][4:] + entity['file_type']
parts = ([self.prefix] + list(entity['id'][:4]) + [name])
elif entity.entity_type in ('SequenceComponent',):
name = 'file'
# Add a sequence identifier.
sequence_expression = self._get_sequence_expression(entity)
name += '.{0}'.format(sequence_expression)
if (
entity['file_type'] and
entity['file_type'] is not ftrack_api.symbol.NOT_SET
):
name += entity['file_type']
parts = ([self.prefix] + list(entity['id'][:4])
+ [entity['id'][4:]] + [name])
elif entity.entity_type in ('ContainerComponent',):
# Just an id directory
parts = ([self.prefix] +
list(entity['id'][:4]) + [entity['id'][4:]])
else:
raise NotImplementedError('Cannot generate path for unsupported '
'entity {0}'.format(entity))
return self.path_separator.join(parts).strip('/')

View file

@ -0,0 +1,28 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from .base import Structure
class OriginStructure(Structure):
'''Origin structure that passes through existing resource identifier.'''
def get_resource_identifier(self, entity, context=None):
'''Return a resource identifier for supplied *entity*.
*context* should be a mapping that includes at least a
'source_resource_identifier' key that refers to the resource identifier
to pass through.
'''
if context is None:
context = {}
resource_identifier = context.get('source_resource_identifier')
if resource_identifier is None:
raise ValueError(
'Could not generate resource identifier as no source resource '
'identifier found in passed context.'
)
return resource_identifier

View file

@ -0,0 +1,215 @@
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
from builtins import str
import os
import re
import unicodedata
import ftrack_api.symbol
import ftrack_api.structure.base
class StandardStructure(ftrack_api.structure.base.Structure):
'''Project hierarchy based structure that only supports Components.
The resource identifier is generated from the project code, the name
of objects in the project structure, asset name and version number::
my_project/folder_a/folder_b/asset_name/v003
If the component is a `FileComponent` then the name of the component and the
file type are used as filename in the resource_identifier::
my_project/folder_a/folder_b/asset_name/v003/foo.jpg
If the component is a `SequenceComponent` then a sequence expression,
`%04d`, is used. E.g. a component with the name `foo` yields::
my_project/folder_a/folder_b/asset_name/v003/foo.%04d.jpg
For the member components their index in the sequence is used::
my_project/folder_a/folder_b/asset_name/v003/foo.0042.jpg
The name of the component is added to the resource identifier if the
component is a `ContainerComponent`. E.g. a container component with the
name `bar` yields::
my_project/folder_a/folder_b/asset_name/v003/bar
For a member of that container the file name is based on the component name
and file type::
my_project/folder_a/folder_b/asset_name/v003/bar/baz.pdf
'''
def __init__(
self, project_versions_prefix=None, illegal_character_substitute='_'
):
'''Initialise structure.
If *project_versions_prefix* is defined, insert after the project code
for versions published directly under the project::
my_project/<project_versions_prefix>/v001/foo.jpg
Replace illegal characters with *illegal_character_substitute* if
defined.
.. note::
Nested component containers/sequences are not supported.
'''
super(StandardStructure, self).__init__()
self.project_versions_prefix = project_versions_prefix
self.illegal_character_substitute = illegal_character_substitute
def _get_parts(self, entity):
'''Return resource identifier parts from *entity*.'''
session = entity.session
version = entity['version']
if version is ftrack_api.symbol.NOT_SET and entity['version_id']:
version = session.get('AssetVersion', entity['version_id'])
error_message = (
'Component {0!r} must be attached to a committed '
'version and a committed asset with a parent context.'.format(
entity
)
)
if (
version is ftrack_api.symbol.NOT_SET or
version in session.created
):
raise ftrack_api.exception.StructureError(error_message)
link = version['link']
if not link:
raise ftrack_api.exception.StructureError(error_message)
structure_names = [
item['name']
for item in link[1:-1]
]
project_id = link[0]['id']
project = session.get('Project', project_id)
asset = version['asset']
version_number = self._format_version(version['version'])
parts = []
parts.append(project['name'])
if structure_names:
parts.extend(structure_names)
elif self.project_versions_prefix:
# Add *project_versions_prefix* if configured and the version is
# published directly under the project.
parts.append(self.project_versions_prefix)
parts.append(asset['name'])
parts.append(version_number)
return [self.sanitise_for_filesystem(part) for part in parts]
def _format_version(self, number):
'''Return a formatted string representing version *number*.'''
return 'v{0:03d}'.format(number)
def sanitise_for_filesystem(self, value):
'''Return *value* with illegal filesystem characters replaced.
An illegal character is one that is not typically valid for filesystem
usage, such as non ascii characters, or can be awkward to use in a
filesystem, such as spaces. Replace these characters with
the character specified by *illegal_character_substitute* on
initialisation. If no character was specified as substitute then return
*value* unmodified.
'''
if self.illegal_character_substitute is None:
return value
value = unicodedata.normalize('NFKD', str(value)).encode('ascii', 'ignore')
value = re.sub('[^\w\.-]', self.illegal_character_substitute, value.decode('utf-8'))
return str(value.strip().lower())
def get_resource_identifier(self, entity, context=None):
'''Return a resource identifier for supplied *entity*.
*context* can be a mapping that supplies additional information, but
is unused in this implementation.
Raise a :py:exc:`ftrack_api.exeption.StructureError` if *entity* is not
attached to a committed version and a committed asset with a parent
context.
'''
if entity.entity_type in ('FileComponent',):
container = entity['container']
if container:
# Get resource identifier for container.
container_path = self.get_resource_identifier(container)
if container.entity_type in ('SequenceComponent',):
# Strip the sequence component expression from the parent
# container and back the correct filename, i.e.
# /sequence/component/sequence_component_name.0012.exr.
name = '{0}.{1}{2}'.format(
container['name'], entity['name'], entity['file_type']
)
parts = [
os.path.dirname(container_path),
self.sanitise_for_filesystem(name)
]
else:
# Container is not a sequence component so add it as a
# normal component inside the container.
name = entity['name'] + entity['file_type']
parts = [
container_path, self.sanitise_for_filesystem(name)
]
else:
# File component does not have a container, construct name from
# component name and file type.
parts = self._get_parts(entity)
name = entity['name'] + entity['file_type']
parts.append(self.sanitise_for_filesystem(name))
elif entity.entity_type in ('SequenceComponent',):
# Create sequence expression for the sequence component and add it
# to the parts.
parts = self._get_parts(entity)
sequence_expression = self._get_sequence_expression(entity)
parts.append(
'{0}.{1}{2}'.format(
self.sanitise_for_filesystem(entity['name']),
sequence_expression,
self.sanitise_for_filesystem(entity['file_type'])
)
)
elif entity.entity_type in ('ContainerComponent',):
# Add the name of the container to the resource identifier parts.
parts = self._get_parts(entity)
parts.append(self.sanitise_for_filesystem(entity['name']))
else:
raise NotImplementedError(
'Cannot generate resource identifier for unsupported '
'entity {0!r}'.format(entity)
)
return self.path_separator.join(parts)

View file

@ -0,0 +1,78 @@
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import os
from builtins import object
class Symbol(object):
'''A constant symbol.'''
def __init__(self, name, value=True):
'''Initialise symbol with unique *name* and *value*.
*value* is used for nonzero testing.
'''
self.name = name
self.value = value
def __str__(self):
'''Return string representation.'''
return self.name
def __repr__(self):
'''Return representation.'''
return '{0}({1})'.format(self.__class__.__name__, self.name)
def __bool__(self):
'''Return whether symbol represents non-zero value.'''
return bool(self.value)
def __copy__(self):
'''Return shallow copy.
Overridden to always return same instance.
'''
return self
#: Symbol representing that no value has been set or loaded.
NOT_SET = Symbol('NOT_SET', False)
#: Symbol representing created state.
CREATED = Symbol('CREATED')
#: Symbol representing modified state.
MODIFIED = Symbol('MODIFIED')
#: Symbol representing deleted state.
DELETED = Symbol('DELETED')
#: Topic published when component added to a location.
COMPONENT_ADDED_TO_LOCATION_TOPIC = 'ftrack.location.component-added'
#: Topic published when component removed from a location.
COMPONENT_REMOVED_FROM_LOCATION_TOPIC = 'ftrack.location.component-removed'
#: Identifier of builtin origin location.
ORIGIN_LOCATION_ID = 'ce9b348f-8809-11e3-821c-20c9d081909b'
#: Identifier of builtin unmanaged location.
UNMANAGED_LOCATION_ID = 'cb268ecc-8809-11e3-a7e2-20c9d081909b'
#: Identifier of builtin review location.
REVIEW_LOCATION_ID = 'cd41be70-8809-11e3-b98a-20c9d081909b'
#: Identifier of builtin connect location.
CONNECT_LOCATION_ID = '07b82a97-8cf9-11e3-9383-20c9d081909b'
#: Identifier of builtin server location.
SERVER_LOCATION_ID = '3a372bde-05bc-11e4-8908-20c9d081909b'
#: Chunk size used when working with data, default to 1Mb.
CHUNK_SIZE = int(os.getenv('FTRACK_API_FILE_CHUNK_SIZE', 0)) or 1024*1024
#: Symbol representing syncing users with ldap
JOB_SYNC_USERS_LDAP = Symbol('SYNC_USERS_LDAP')