From cc8eb20f0eb3b2c2ae3fa111ae1c820639bf9f99 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 22 Dec 2020 11:58:22 +0100 Subject: [PATCH] added ftrack_api without _old suffix --- .../python2_vendor/ftrack_api/__init__.py | 32 + .../_centralized_storage_scenario.py | 659 +++++ .../ftrack_api/_python_ntpath.py | 537 ++++ .../python2_vendor/ftrack_api/_version.py | 5 + .../python2_vendor/ftrack_api/_weakref.py | 66 + .../ftrack_api/accessor/__init__.py | 2 + .../ftrack_api/accessor/base.py | 124 + .../ftrack_api/accessor/disk.py | 251 ++ .../ftrack_api/accessor/server.py | 240 ++ .../python2_vendor/ftrack_api/attribute.py | 708 +++++ .../ftrack/python2_vendor/ftrack_api/cache.py | 608 ++++ .../python2_vendor/ftrack_api/collection.py | 515 ++++ .../ftrack/python2_vendor/ftrack_api/data.py | 145 + .../ftrack_api/entity/__init__.py | 2 + .../ftrack_api/entity/asset_version.py | 91 + .../python2_vendor/ftrack_api/entity/base.py | 407 +++ .../ftrack_api/entity/component.py | 75 + .../ftrack_api/entity/factory.py | 443 +++ .../python2_vendor/ftrack_api/entity/job.py | 48 + .../ftrack_api/entity/location.py | 745 +++++ .../python2_vendor/ftrack_api/entity/note.py | 105 + .../ftrack_api/entity/project_schema.py | 94 + .../python2_vendor/ftrack_api/entity/user.py | 124 + .../ftrack_api/event/__init__.py | 2 + .../python2_vendor/ftrack_api/event/base.py | 86 + .../ftrack_api/event/expression.py | 285 ++ .../python2_vendor/ftrack_api/event/hub.py | 1108 ++++++++ .../ftrack_api/event/subscriber.py | 28 + .../ftrack_api/event/subscription.py | 24 + .../python2_vendor/ftrack_api/exception.py | 393 +++ .../python2_vendor/ftrack_api/formatter.py | 132 + .../python2_vendor/ftrack_api/inspection.py | 138 + .../python2_vendor/ftrack_api/logging.py | 43 + .../python2_vendor/ftrack_api/operation.py | 116 + .../python2_vendor/ftrack_api/plugin.py | 123 + .../ftrack/python2_vendor/ftrack_api/query.py | 202 ++ .../__init__.py | 2 + .../resource_identifier_transformer/base.py | 51 + .../python2_vendor/ftrack_api/session.py | 2468 +++++++++++++++++ .../ftrack_api/structure/__init__.py | 2 + .../ftrack_api/structure/base.py | 38 + .../ftrack_api/structure/entity_id.py | 12 + .../python2_vendor/ftrack_api/structure/id.py | 91 + .../ftrack_api/structure/origin.py | 28 + .../ftrack_api/structure/standard.py | 215 ++ .../python2_vendor/ftrack_api/symbol.py | 78 + 46 files changed, 11691 insertions(+) create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/_centralized_storage_scenario.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/_python_ntpath.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/_version.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/_weakref.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/accessor/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/accessor/base.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/accessor/disk.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/accessor/server.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/attribute.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/cache.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/collection.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/data.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/asset_version.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/base.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/component.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/factory.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/job.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/location.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/note.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/project_schema.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/entity/user.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/event/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/event/base.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/event/expression.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/event/hub.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/event/subscriber.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/event/subscription.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/exception.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/formatter.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/inspection.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/logging.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/operation.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/plugin.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/query.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/base.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/session.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/structure/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/structure/base.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/structure/entity_id.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/structure/id.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/structure/origin.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/structure/standard.py create mode 100644 pype/modules/ftrack/python2_vendor/ftrack_api/symbol.py diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api/__init__.py new file mode 100644 index 0000000000..d8ee30bd8f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/__init__.py @@ -0,0 +1,32 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from ._version import __version__ +from .session import Session + + +def mixin(instance, mixin_class, name=None): + '''Mixin *mixin_class* to *instance*. + + *name* can be used to specify new class name. If not specified then one will + be generated. + + ''' + if name is None: + name = '{0}{1}'.format( + instance.__class__.__name__, mixin_class.__name__ + ) + + # Check mixin class not already present in mro in order to avoid consistent + # method resolution failure. + if mixin_class in instance.__class__.mro(): + return + + instance.__class__ = type( + name, + ( + mixin_class, + instance.__class__ + ), + {} + ) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/_centralized_storage_scenario.py b/pype/modules/ftrack/python2_vendor/ftrack_api/_centralized_storage_scenario.py new file mode 100644 index 0000000000..1770b0c843 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/_centralized_storage_scenario.py @@ -0,0 +1,659 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2016 ftrack + +from __future__ import absolute_import +from __future__ import unicode_literals + +from builtins import str +from builtins import object +import logging +import json +import sys +import os + +import ftrack_api +import ftrack_api.structure.standard as _standard +from ftrack_api.logging import LazyLogMessage as L + + +scenario_name = 'ftrack.centralized-storage' + + +class ConfigureCentralizedStorageScenario(object): + '''Configure a centralized storage scenario.''' + + def __init__(self): + '''Instansiate centralized storage scenario.''' + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + + @property + def storage_scenario(self): + '''Return storage scenario setting.''' + return self.session.query( + 'select value from Setting ' + 'where name is "storage_scenario" and group is "STORAGE"' + ).one() + + @property + def existing_centralized_storage_configuration(self): + '''Return existing centralized storage configuration.''' + storage_scenario = self.storage_scenario + + try: + configuration = json.loads(storage_scenario['value']) + except (ValueError, TypeError): + return None + + if not isinstance(configuration, dict): + return None + + if configuration.get('scenario') != scenario_name: + return None + + return configuration.get('data', {}) + + def _get_confirmation_text(self, configuration): + '''Return confirmation text from *configuration*.''' + configure_location = configuration.get('configure_location') + select_location = configuration.get('select_location') + select_mount_point = configuration.get('select_mount_point') + + if configure_location: + location_text = str( + 'A new location will be created:\n\n' + '* Label: {location_label}\n' + '* Name: {location_name}\n' + '* Description: {location_description}\n' + ).format(**configure_location) + else: + location = self.session.get( + 'Location', select_location['location_id'] + ) + location_text = ( + u'You have choosen to use an existing location: {0}'.format( + location['label'] + ) + ) + + mount_points_text = str( + '* Linux: {linux}\n' + '* OS X: {osx}\n' + '* Windows: {windows}\n\n' + ).format( + linux=select_mount_point.get('linux_mount_point') or '*Not set*', + osx=select_mount_point.get('osx_mount_point') or '*Not set*', + windows=select_mount_point.get('windows_mount_point') or '*Not set*' + ) + + mount_points_not_set = [] + + if not select_mount_point.get('linux_mount_point'): + mount_points_not_set.append('Linux') + + if not select_mount_point.get('osx_mount_point'): + mount_points_not_set.append('OS X') + + if not select_mount_point.get('windows_mount_point'): + mount_points_not_set.append('Windows') + + if mount_points_not_set: + mount_points_text += str( + 'Please be aware that this location will not be working on ' + '{missing} because the mount points are not set up.' + ).format( + missing=' and '.join(mount_points_not_set) + ) + + text = str( + '#Confirm storage setup#\n\n' + 'Almost there! Please take a moment to verify the settings you ' + 'are about to save. You can always come back later and update the ' + 'configuration.\n' + '##Location##\n\n' + '{location}\n' + '##Mount points##\n\n' + '{mount_points}' + ).format( + location=location_text, + mount_points=mount_points_text + ) + + return text + + def configure_scenario(self, event): + '''Configure scenario based on *event* and return form items.''' + steps = ( + 'select_scenario', + 'select_location', + 'configure_location', + 'select_structure', + 'select_mount_point', + 'confirm_summary', + 'save_configuration' + ) + + warning_message = '' + values = event['data'].get('values', {}) + + # Calculate previous step and the next. + previous_step = values.get('step', 'select_scenario') + next_step = steps[steps.index(previous_step) + 1] + state = 'configuring' + + self.logger.info(L( + u'Configuring scenario, previous step: {0}, next step: {1}. ' + u'Values {2!r}.', + previous_step, next_step, values + )) + + if 'configuration' in values: + configuration = values.pop('configuration') + else: + configuration = {} + + if values: + # Update configuration with values from the previous step. + configuration[previous_step] = values + + if previous_step == 'select_location': + values = configuration['select_location'] + if values.get('location_id') != 'create_new_location': + location_exists = self.session.query( + 'Location where id is "{0}"'.format( + values.get('location_id') + ) + ).first() + if not location_exists: + next_step = 'select_location' + warning_message = ( + '**The selected location does not exist. Please choose ' + 'one from the dropdown or create a new one.**' + ) + + if next_step == 'select_location': + try: + location_id = ( + self.existing_centralized_storage_configuration['location_id'] + ) + except (KeyError, TypeError): + location_id = None + + options = [{ + 'label': 'Create new location', + 'value': 'create_new_location' + }] + for location in self.session.query( + 'select name, label, description from Location' + ): + if location['name'] not in ( + 'ftrack.origin', 'ftrack.unmanaged', 'ftrack.connect', + 'ftrack.server', 'ftrack.review' + ): + options.append({ + 'label': u'{label} ({name})'.format( + label=location['label'], name=location['name'] + ), + 'description': location['description'], + 'value': location['id'] + }) + + warning = '' + if location_id is not None: + # If there is already a location configured we must make the + # user aware that changing the location may be problematic. + warning = ( + '\n\n**Be careful if you switch to another location ' + 'for an existing storage scenario. Components that have ' + 'already been published to the previous location will be ' + 'made unavailable for common use.**' + ) + default_value = location_id + elif location_id is None and len(options) == 1: + # No location configured and no existing locations to use. + default_value = 'create_new_location' + else: + # There are existing locations to choose from but non of them + # are currently active in the centralized storage scenario. + default_value = None + + items = [{ + 'type': 'label', + 'value': ( + '#Select location#\n' + 'Choose an already existing location or create a new one ' + 'to represent your centralized storage. {0}'.format( + warning + ) + ) + }, { + 'type': 'enumerator', + 'label': 'Location', + 'name': 'location_id', + 'value': default_value, + 'data': options + }] + + default_location_name = 'studio.central-storage-location' + default_location_label = 'Studio location' + default_location_description = ( + 'The studio central location where all components are ' + 'stored.' + ) + + if previous_step == 'configure_location': + configure_location = configuration.get( + 'configure_location' + ) + + if configure_location: + try: + existing_location = self.session.query( + u'Location where name is "{0}"'.format( + configure_location.get('location_name') + ) + ).first() + except UnicodeEncodeError: + next_step = 'configure_location' + warning_message += ( + '**The location name contains non-ascii characters. ' + 'Please change the name and try again.**' + ) + values = configuration['select_location'] + else: + if existing_location: + next_step = 'configure_location' + warning_message += ( + u'**There is already a location named {0}. ' + u'Please change the name and try again.**'.format( + configure_location.get('location_name') + ) + ) + values = configuration['select_location'] + + if ( + not configure_location.get('location_name') or + not configure_location.get('location_label') or + not configure_location.get('location_description') + ): + next_step = 'configure_location' + warning_message += ( + '**Location name, label and description cannot ' + 'be empty.**' + ) + values = configuration['select_location'] + + if next_step == 'configure_location': + # Populate form with previous configuration. + default_location_label = configure_location['location_label'] + default_location_name = configure_location['location_name'] + default_location_description = ( + configure_location['location_description'] + ) + + if next_step == 'configure_location': + + if values.get('location_id') == 'create_new_location': + # Add options to create a new location. + items = [{ + 'type': 'label', + 'value': ( + '#Create location#\n' + 'Here you will create a new location to be used ' + 'with your new Storage scenario. For your ' + 'convenience we have already filled in some default ' + 'values. If this is the first time you are configuring ' + 'a storage scenario in ftrack we recommend that you ' + 'stick with these settings.' + ) + }, { + 'label': 'Label', + 'name': 'location_label', + 'value': default_location_label, + 'type': 'text' + }, { + 'label': 'Name', + 'name': 'location_name', + 'value': default_location_name, + 'type': 'text' + }, { + 'label': 'Description', + 'name': 'location_description', + 'value': default_location_description, + 'type': 'text' + }] + + else: + # The user selected an existing location. Move on to next + # step. + next_step = 'select_mount_point' + + if next_step == 'select_structure': + # There is only one structure to choose from, go to next step. + next_step = 'select_mount_point' + # items = [ + # { + # 'type': 'label', + # 'value': ( + # '#Select structure#\n' + # 'Select which structure to use with your location. ' + # 'The structure is used to generate the filesystem ' + # 'path for components that are added to this location.' + # ) + # }, + # { + # 'type': 'enumerator', + # 'label': 'Structure', + # 'name': 'structure_id', + # 'value': 'standard', + # 'data': [{ + # 'label': 'Standard', + # 'value': 'standard', + # 'description': ( + # 'The Standard structure uses the names in your ' + # 'project structure to determine the path.' + # ) + # }] + # } + # ] + + if next_step == 'select_mount_point': + try: + mount_points = ( + self.existing_centralized_storage_configuration['accessor']['mount_points'] + ) + except (KeyError, TypeError): + mount_points = dict() + + items = [ + { + 'value': ( + '#Mount points#\n' + 'Set mount points for your centralized storage ' + 'location. For the location to work as expected each ' + 'platform that you intend to use must have the ' + 'corresponding mount point set and the storage must ' + 'be accessible. If not set correctly files will not be ' + 'saved or read.' + ), + 'type': 'label' + }, { + 'type': 'text', + 'label': 'Linux', + 'name': 'linux_mount_point', + 'empty_text': 'E.g. /usr/mnt/MyStorage ...', + 'value': mount_points.get('linux', '') + }, { + 'type': 'text', + 'label': 'OS X', + 'name': 'osx_mount_point', + 'empty_text': 'E.g. /Volumes/MyStorage ...', + 'value': mount_points.get('osx', '') + }, { + 'type': 'text', + 'label': 'Windows', + 'name': 'windows_mount_point', + 'empty_text': 'E.g. \\\\MyStorage ...', + 'value': mount_points.get('windows', '') + } + ] + + if next_step == 'confirm_summary': + items = [{ + 'type': 'label', + 'value': self._get_confirmation_text(configuration) + }] + state = 'confirm' + + if next_step == 'save_configuration': + mount_points = configuration['select_mount_point'] + select_location = configuration['select_location'] + + if select_location['location_id'] == 'create_new_location': + configure_location = configuration['configure_location'] + location = self.session.create( + 'Location', + { + 'name': configure_location['location_name'], + 'label': configure_location['location_label'], + 'description': ( + configure_location['location_description'] + ) + } + ) + + else: + location = self.session.query( + 'Location where id is "{0}"'.format( + select_location['location_id'] + ) + ).one() + + setting_value = json.dumps({ + 'scenario': scenario_name, + 'data': { + 'location_id': location['id'], + 'location_name': location['name'], + 'accessor': { + 'mount_points': { + 'linux': mount_points['linux_mount_point'], + 'osx': mount_points['osx_mount_point'], + 'windows': mount_points['windows_mount_point'] + } + } + } + }) + + self.storage_scenario['value'] = setting_value + self.session.commit() + + # Broadcast an event that storage scenario has been configured. + event = ftrack_api.event.base.Event( + topic='ftrack.storage-scenario.configure-done' + ) + self.session.event_hub.publish(event) + + items = [{ + 'type': 'label', + 'value': ( + '#Done!#\n' + 'Your storage scenario is now configured and ready ' + 'to use. **Note that you may have to restart Connect and ' + 'other applications to start using it.**' + ) + }] + state = 'done' + + if warning_message: + items.insert(0, { + 'type': 'label', + 'value': warning_message + }) + + items.append({ + 'type': 'hidden', + 'value': configuration, + 'name': 'configuration' + }) + items.append({ + 'type': 'hidden', + 'value': next_step, + 'name': 'step' + }) + + return { + 'items': items, + 'state': state + } + + def discover_centralized_scenario(self, event): + '''Return action discover dictionary for *event*.''' + return { + 'id': scenario_name, + 'name': 'Centralized storage scenario', + 'description': ( + '(Recommended) centralized storage scenario where all files ' + 'are kept on a storage that is mounted and available to ' + 'everyone in the studio.' + ) + } + + def register(self, session): + '''Subscribe to events on *session*.''' + self.session = session + + #: TODO: Move these to a separate function. + session.event_hub.subscribe( + str( + 'topic=ftrack.storage-scenario.discover ' + 'and source.user.username="{0}"' + ).format( + session.api_user + ), + self.discover_centralized_scenario + ) + session.event_hub.subscribe( + str( + 'topic=ftrack.storage-scenario.configure ' + 'and data.scenario_id="{0}" ' + 'and source.user.username="{1}"' + ).format( + scenario_name, + session.api_user + ), + self.configure_scenario + ) + + +class ActivateCentralizedStorageScenario(object): + '''Activate a centralized storage scenario.''' + + def __init__(self): + '''Instansiate centralized storage scenario.''' + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + + def activate(self, event): + '''Activate scenario in *event*.''' + storage_scenario = event['data']['storage_scenario'] + + try: + location_data = storage_scenario['data'] + location_name = location_data['location_name'] + location_id = location_data['location_id'] + mount_points = location_data['accessor']['mount_points'] + + except KeyError: + error_message = ( + 'Unable to read storage scenario data.' + ) + self.logger.error(L(error_message)) + raise ftrack_api.exception.LocationError( + 'Unable to configure location based on scenario.' + ) + + else: + location = self.session.create( + 'Location', + data=dict( + name=location_name, + id=location_id + ), + reconstructing=True + ) + + if 'darwin' in sys.platform: + prefix = mount_points['osx'] + elif 'linux' in sys.platform: + prefix = mount_points['linux'] + elif 'win' in sys.platform: + prefix = mount_points['windows'] + else: + raise ftrack_api.exception.LocationError( + ( + 'Unable to find accessor prefix for platform {0}.' + ).format(sys.platform) + ) + + location.accessor = ftrack_api.accessor.disk.DiskAccessor( + prefix=prefix + ) + location.structure = _standard.StandardStructure() + location.priority = 1 + self.logger.info(L( + u'Storage scenario activated. Configured {0!r} from ' + u'{1!r}', + location, storage_scenario + )) + + def _verify_startup(self, event): + '''Verify the storage scenario configuration.''' + storage_scenario = event['data']['storage_scenario'] + location_data = storage_scenario['data'] + mount_points = location_data['accessor']['mount_points'] + + prefix = None + if 'darwin' in sys.platform: + prefix = mount_points['osx'] + elif 'linux' in sys.platform: + prefix = mount_points['linux'] + elif 'win' in sys.platform: + prefix = mount_points['windows'] + + if not prefix: + return ( + u'The storage scenario has not been configured for your ' + u'operating system. ftrack may not be able to ' + u'store and track files correctly.' + ) + + if not os.path.isdir(prefix): + return ( + str( + 'The path {0} does not exist. ftrack may not be able to ' + 'store and track files correctly. \n\nIf the storage is ' + 'newly setup you may want to create necessary folder ' + 'structures. If the storage is a network drive you should ' + 'make sure that it is mounted correctly.' + ).format(prefix) + ) + + def register(self, session): + '''Subscribe to events on *session*.''' + self.session = session + + session.event_hub.subscribe( + ( + 'topic=ftrack.storage-scenario.activate ' + 'and data.storage_scenario.scenario="{0}"'.format( + scenario_name + ) + ), + self.activate + ) + + # Listen to verify startup event from ftrack connect to allow responding + # with a message if something is not working correctly with this + # scenario that the user should be notified about. + self.session.event_hub.subscribe( + ( + 'topic=ftrack.connect.verify-startup ' + 'and data.storage_scenario.scenario="{0}"'.format( + scenario_name + ) + ), + self._verify_startup + ) + +def register(session): + '''Register storage scenario.''' + scenario = ActivateCentralizedStorageScenario() + scenario.register(session) + + +def register_configuration(session): + '''Register storage scenario.''' + scenario = ConfigureCentralizedStorageScenario() + scenario.register(session) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/_python_ntpath.py b/pype/modules/ftrack/python2_vendor/ftrack_api/_python_ntpath.py new file mode 100644 index 0000000000..c5a8fe93fd --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/_python_ntpath.py @@ -0,0 +1,537 @@ +# pragma: no cover +# Module 'ntpath' -- common operations on WinNT/Win95 pathnames +"""Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +""" +from __future__ import unicode_literals + +from builtins import str +from builtins import zip +import os +import sys +import stat +import genericpath +import warnings + +from genericpath import * + +__all__ = ["normcase","isabs","join","splitdrive","split","splitext", + "basename","dirname","commonprefix","getsize","getmtime", + "getatime","getctime", "islink","exists","lexists","isdir","isfile", + "ismount","walk","expanduser","expandvars","normpath","abspath", + "splitunc","curdir","pardir","sep","pathsep","defpath","altsep", + "extsep","devnull","realpath","supports_unicode_filenames","relpath"] + +# strings representing various path-related bits and pieces +curdir = '.' +pardir = '..' +extsep = '.' +sep = '\\' +pathsep = ';' +altsep = '/' +defpath = '.;C:\\bin' +if 'ce' in sys.builtin_module_names: + defpath = '\\Windows' +elif 'os2' in sys.builtin_module_names: + # OS/2 w/ VACPP + altsep = '/' +devnull = 'nul' + +# Normalize the case of a pathname and map slashes to backslashes. +# Other normalizations (such as optimizing '../' away) are not done +# (this is done by normpath). + +def normcase(s): + """Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes.""" + return s.replace("/", "\\").lower() + + +# Return whether a path is absolute. +# Trivial in Posix, harder on the Mac or MS-DOS. +# For DOS it is absolute if it starts with a slash or backslash (current +# volume), or if a pathname after the volume letter and colon / UNC resource +# starts with a slash or backslash. + +def isabs(s): + """Test whether a path is absolute""" + s = splitdrive(s)[1] + return s != '' and s[:1] in '/\\' + + +# Join two (or more) paths. + +def join(a, *p): + """Join two or more pathname components, inserting "\\" as needed. + If any component is an absolute path, all previous path components + will be discarded.""" + path = a + for b in p: + b_wins = 0 # set to 1 iff b makes path irrelevant + if path == "": + b_wins = 1 + + elif isabs(b): + # This probably wipes out path so far. However, it's more + # complicated if path begins with a drive letter: + # 1. join('c:', '/a') == 'c:/a' + # 2. join('c:/', '/a') == 'c:/a' + # But + # 3. join('c:/a', '/b') == '/b' + # 4. join('c:', 'd:/') = 'd:/' + # 5. join('c:/', 'd:/') = 'd:/' + if path[1:2] != ":" or b[1:2] == ":": + # Path doesn't start with a drive letter, or cases 4 and 5. + b_wins = 1 + + # Else path has a drive letter, and b doesn't but is absolute. + elif len(path) > 3 or (len(path) == 3 and + path[-1] not in "/\\"): + # case 3 + b_wins = 1 + + if b_wins: + path = b + else: + # Join, and ensure there's a separator. + assert len(path) > 0 + if path[-1] in "/\\": + if b and b[0] in "/\\": + path += b[1:] + else: + path += b + elif path[-1] == ":": + path += b + elif b: + if b[0] in "/\\": + path += b + else: + path += "\\" + b + else: + # path is not empty and does not end with a backslash, + # but b is empty; since, e.g., split('a/') produces + # ('a', ''), it's best if join() adds a backslash in + # this case. + path += '\\' + + return path + + +# Split a path in a drive specification (a drive letter followed by a +# colon) and the path specification. +# It is always true that drivespec + pathspec == p +def splitdrive(p): + """Split a pathname into drive and path specifiers. Returns a 2-tuple +"(drive,path)"; either part may be empty""" + if p[1:2] == ':': + return p[0:2], p[2:] + return '', p + + +# Parse UNC paths +def splitunc(p): + """Split a pathname into UNC mount point and relative path specifiers. + + Return a 2-tuple (unc, rest); either part may be empty. + If unc is not empty, it has the form '//host/mount' (or similar + using backslashes). unc+rest is always the input path. + Paths containing drive letters never have an UNC part. + """ + if p[1:2] == ':': + return '', p # Drive letter present + firstTwo = p[0:2] + if firstTwo == '//' or firstTwo == '\\\\': + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvv equivalent to drive letter + # \\machine\mountpoint\directories... + # directory ^^^^^^^^^^^^^^^ + normp = normcase(p) + index = normp.find('\\', 2) + if index == -1: + ##raise RuntimeError, 'illegal UNC path: "' + p + '"' + return ("", p) + index = normp.find('\\', index + 1) + if index == -1: + index = len(p) + return p[:index], p[index:] + return '', p + + +# Split a path in head (everything up to the last '/') and tail (the +# rest). After the trailing '/' is stripped, the invariant +# join(head, tail) == p holds. +# The resulting head won't end in '/' unless it is the root. + +def split(p): + """Split a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.""" + + d, p = splitdrive(p) + # set i to index beyond p's last slash + i = len(p) + while i and p[i-1] not in '/\\': + i = i - 1 + head, tail = p[:i], p[i:] # now tail has no slashes + # remove trailing slashes from head, unless it's all slashes + head2 = head + while head2 and head2[-1] in '/\\': + head2 = head2[:-1] + head = head2 or head + return d + head, tail + + +# Split a path in root and extension. +# The extension is everything starting at the last dot in the last +# pathname component; the root is everything before that. +# It is always true that root + ext == p. + +def splitext(p): + return genericpath._splitext(p, sep, altsep, extsep) +splitext.__doc__ = genericpath._splitext.__doc__ + + +# Return the tail (basename) part of a path. + +def basename(p): + """Returns the final component of a pathname""" + return split(p)[1] + + +# Return the head (dirname) part of a path. + +def dirname(p): + """Returns the directory component of a pathname""" + return split(p)[0] + +# Is a path a symbolic link? +# This will always return false on systems where posix.lstat doesn't exist. + +def islink(path): + """Test for symbolic link. + On WindowsNT/95 and OS/2 always returns false + """ + return False + +# alias exists to lexists +lexists = exists + +# Is a path a mount point? Either a root (with or without drive letter) +# or an UNC path with at most a / or \ after the mount point. + +def ismount(path): + """Test whether a path is a mount point (defined as root of drive)""" + unc, rest = splitunc(path) + if unc: + return rest in ("", "/", "\\") + p = splitdrive(path)[1] + return len(p) == 1 and p[0] in '/\\' + + +# Directory tree walk. +# For each directory under top (including top itself, but excluding +# '.' and '..'), func(arg, dirname, filenames) is called, where +# dirname is the name of the directory and filenames is the list +# of files (and subdirectories etc.) in the directory. +# The func may modify the filenames list, to implement a filter, +# or to impose a different order of visiting. + +def walk(top, func, arg): + """Directory tree walk with callback function. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), call func(arg, dirname, fnames). + dirname is the name of the directory, and fnames a list of the names of + the files and subdirectories in dirname (excluding '.' and '..'). func + may modify the fnames list in-place (e.g. via del or slice assignment), + and walk will only recurse into the subdirectories whose names remain in + fnames; this can be used to implement a filter, or to impose a specific + order of visiting. No semantics are defined for, or required of, arg, + beyond that arg is always passed to func. It can be used, e.g., to pass + a filename pattern, or a mutable object designed to accumulate + statistics. Passing None for arg is common.""" + warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.", + stacklevel=2) + try: + names = os.listdir(top) + except os.error: + return + func(arg, top, names) + for name in names: + name = join(top, name) + if isdir(name): + walk(name, func, arg) + + +# Expand paths beginning with '~' or '~user'. +# '~' means $HOME; '~user' means that user's home directory. +# If the path doesn't begin with '~', or if the user or $HOME is unknown, +# the path is returned unchanged (leaving error reporting to whatever +# function is called with the expanded path as argument). +# See also module 'glob' for expansion of *, ? and [...] in pathnames. +# (A function should also be defined to do full *sh-style environment +# variable expansion.) + +def expanduser(path): + """Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.""" + if path[:1] != '~': + return path + i, n = 1, len(path) + while i < n and path[i] not in '/\\': + i = i + 1 + + if 'HOME' in os.environ: + userhome = os.environ['HOME'] + elif 'USERPROFILE' in os.environ: + userhome = os.environ['USERPROFILE'] + elif not 'HOMEPATH' in os.environ: + return path + else: + try: + drive = os.environ['HOMEDRIVE'] + except KeyError: + drive = '' + userhome = join(drive, os.environ['HOMEPATH']) + + if i != 1: #~user + userhome = join(dirname(userhome), path[1:i]) + + return userhome + path[i:] + + +# Expand paths containing shell variable substitutions. +# The following rules apply: +# - no expansion within single quotes +# - '$$' is translated into '$' +# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2% +# - ${varname} is accepted. +# - $varname is accepted. +# - %varname% is accepted. +# - varnames can be made out of letters, digits and the characters '_-' +# (though is not verified in the ${varname} and %varname% cases) +# XXX With COMMAND.COM you can use any characters in a variable name, +# XXX except '^|<>='. + +def expandvars(path): + """Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.""" + if '$' not in path and '%' not in path: + return path + import string + varchars = string.ascii_letters + string.digits + '_-' + res = '' + index = 0 + pathlen = len(path) + while index < pathlen: + c = path[index] + if c == '\'': # no expansion within single quotes + path = path[index + 1:] + pathlen = len(path) + try: + index = path.index('\'') + res = res + '\'' + path[:index + 1] + except ValueError: + res = res + path + index = pathlen - 1 + elif c == '%': # variable or '%' + if path[index + 1:index + 2] == '%': + res = res + c + index = index + 1 + else: + path = path[index+1:] + pathlen = len(path) + try: + index = path.index('%') + except ValueError: + res = res + '%' + path + index = pathlen - 1 + else: + var = path[:index] + if var in os.environ: + res = res + os.environ[var] + else: + res = res + '%' + var + '%' + elif c == '$': # variable or '$$' + if path[index + 1:index + 2] == '$': + res = res + c + index = index + 1 + elif path[index + 1:index + 2] == '{': + path = path[index+2:] + pathlen = len(path) + try: + index = path.index('}') + var = path[:index] + if var in os.environ: + res = res + os.environ[var] + else: + res = res + '${' + var + '}' + except ValueError: + res = res + '${' + path + index = pathlen - 1 + else: + var = '' + index = index + 1 + c = path[index:index + 1] + while c != '' and c in varchars: + var = var + c + index = index + 1 + c = path[index:index + 1] + if var in os.environ: + res = res + os.environ[var] + else: + res = res + '$' + var + if c != '': + index = index - 1 + else: + res = res + c + index = index + 1 + return res + + +# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. +# Previously, this function also truncated pathnames to 8+3 format, +# but as this module is called "ntpath", that's obviously wrong! + +def normpath(path): + """Normalize path, eliminating double slashes, etc.""" + # Preserve unicode (if path is unicode) + backslash, dot = (u'\\', u'.') if isinstance(path, str) else ('\\', '.') + if path.startswith(('\\\\.\\', '\\\\?\\')): + # in the case of paths with these prefixes: + # \\.\ -> device names + # \\?\ -> literal paths + # do not do any normalization, but return the path unchanged + return path + path = path.replace("/", "\\") + prefix, path = splitdrive(path) + # We need to be careful here. If the prefix is empty, and the path starts + # with a backslash, it could either be an absolute path on the current + # drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It + # is therefore imperative NOT to collapse multiple backslashes blindly in + # that case. + # The code below preserves multiple backslashes when there is no drive + # letter. This means that the invalid filename \\\a\b is preserved + # unchanged, where a\\\b is normalised to a\b. It's not clear that there + # is any better behaviour for such edge cases. + if prefix == '': + # No drive letter - preserve initial backslashes + while path[:1] == "\\": + prefix = prefix + backslash + path = path[1:] + else: + # We have a drive letter - collapse initial backslashes + if path.startswith("\\"): + prefix = prefix + backslash + path = path.lstrip("\\") + comps = path.split("\\") + i = 0 + while i < len(comps): + if comps[i] in ('.', ''): + del comps[i] + elif comps[i] == '..': + if i > 0 and comps[i-1] != '..': + del comps[i-1:i+1] + i -= 1 + elif i == 0 and prefix.endswith("\\"): + del comps[i] + else: + i += 1 + else: + i += 1 + # If the path is now empty, substitute '.' + if not prefix and not comps: + comps.append(dot) + return prefix + backslash.join(comps) + + +# Return an absolute path. +try: + from nt import _getfullpathname + +except ImportError: # not running on Windows - mock up something sensible + def abspath(path): + """Return the absolute version of a path.""" + if not isabs(path): + if isinstance(path, str): + cwd = os.getcwd() + else: + cwd = os.getcwd() + path = join(cwd, path) + return normpath(path) + +else: # use native Windows method on Windows + def abspath(path): + """Return the absolute version of a path.""" + + if path: # Empty path must return current working directory. + try: + path = _getfullpathname(path) + except WindowsError: + pass # Bad path - return unchanged. + elif isinstance(path, str): + path = os.getcwd() + else: + path = os.getcwd() + return normpath(path) + +# realpath is a no-op on systems without islink support +realpath = abspath +# Win9x family and earlier have no Unicode filename support. +supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and + sys.getwindowsversion()[3] >= 2) + +def _abspath_split(path): + abs = abspath(normpath(path)) + prefix, rest = splitunc(abs) + is_unc = bool(prefix) + if not is_unc: + prefix, rest = splitdrive(abs) + return is_unc, prefix, [x for x in rest.split(sep) if x] + +def relpath(path, start=curdir): + """Return a relative version of a path""" + + if not path: + raise ValueError("no path specified") + + start_is_unc, start_prefix, start_list = _abspath_split(start) + path_is_unc, path_prefix, path_list = _abspath_split(path) + + if path_is_unc ^ start_is_unc: + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + if path_prefix.lower() != start_prefix.lower(): + if path_is_unc: + raise ValueError("path is on UNC root %s, start on UNC root %s" + % (path_prefix, start_prefix)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_prefix, start_prefix)) + # Work out how much of the filepath is shared by start and path. + i = 0 + for e1, e2 in zip(start_list, path_list): + if e1.lower() != e2.lower(): + break + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + +try: + # The genericpath.isdir implementation uses os.stat and checks the mode + # attribute to tell whether or not the path is a directory. + # This is overkill on Windows - just pass the path to GetFileAttributes + # and check the attribute from there. + from nt import _isdir as isdir +except ImportError: + # Use genericpath.isdir as imported above. + pass diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/_version.py b/pype/modules/ftrack/python2_vendor/ftrack_api/_version.py new file mode 100644 index 0000000000..a36a3f15bd --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/_version.py @@ -0,0 +1,5 @@ + +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +__version__ = '2.1.0' diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/_weakref.py b/pype/modules/ftrack/python2_vendor/ftrack_api/_weakref.py new file mode 100644 index 0000000000..69cc6f4b4f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/_weakref.py @@ -0,0 +1,66 @@ +""" +Yet another backport of WeakMethod for Python 2.7. +Changes include removing exception chaining and adding args to super() calls. + +Copyright (c) 2001-2019 Python Software Foundation.All rights reserved. + +Full license available in LICENSE.python. +""" +from weakref import ref + + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError( + "argument should be a bound method, not {}".format(type(meth)) + ) + + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super(WeakMethod, self).__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return NotImplemented + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return NotImplemented + + __hash__ = ref.__hash__ diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/__init__.py new file mode 100644 index 0000000000..1aab07ed77 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/__init__.py @@ -0,0 +1,2 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/base.py new file mode 100644 index 0000000000..b76e2a6a9c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/base.py @@ -0,0 +1,124 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2013 ftrack + +from builtins import object +import abc + +import ftrack_api.exception +from future.utils import with_metaclass + + +class Accessor(with_metaclass(abc.ABCMeta, object)): + '''Provide data access to a location. + + A location represents a specific storage, but access to that storage may + vary. For example, both local filesystem and FTP access may be possible for + the same storage. An accessor implements these different ways of accessing + the same data location. + + As different accessors may access the same location, only part of a data + path that is commonly understood may be stored in the database. The format + of this path should be a contract between the accessors that require access + to the same location and is left as an implementation detail. As such, this + system provides no guarantee that two different accessors can provide access + to the same location, though this is a clear goal. The path stored centrally + is referred to as the **resource identifier** and should be used when + calling any of the accessor methods that accept a *resource_identifier* + argument. + + ''' + + def __init__(self): + '''Initialise location accessor.''' + super(Accessor, self).__init__() + + @abc.abstractmethod + def list(self, resource_identifier): + '''Return list of entries in *resource_identifier* container. + + Each entry in the returned list should be a valid resource identifier. + + Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if + *resource_identifier* does not exist or + :exc:`~ftrack_api.exception.AccessorResourceInvalidError` if + *resource_identifier* is not a container. + + ''' + + @abc.abstractmethod + def exists(self, resource_identifier): + '''Return if *resource_identifier* is valid and exists in location.''' + + @abc.abstractmethod + def is_file(self, resource_identifier): + '''Return whether *resource_identifier* refers to a file.''' + + @abc.abstractmethod + def is_container(self, resource_identifier): + '''Return whether *resource_identifier* refers to a container.''' + + @abc.abstractmethod + def is_sequence(self, resource_identifier): + '''Return whether *resource_identifier* refers to a file sequence.''' + + @abc.abstractmethod + def open(self, resource_identifier, mode='rb'): + '''Return :class:`~ftrack_api.data.Data` for *resource_identifier*.''' + + @abc.abstractmethod + def remove(self, resource_identifier): + '''Remove *resource_identifier*. + + Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if + *resource_identifier* does not exist. + + ''' + + @abc.abstractmethod + def make_container(self, resource_identifier, recursive=True): + '''Make a container at *resource_identifier*. + + If *recursive* is True, also make any intermediate containers. + + Should silently ignore existing containers and not recreate them. + + ''' + + @abc.abstractmethod + def get_container(self, resource_identifier): + '''Return resource_identifier of container for *resource_identifier*. + + Raise :exc:`~ftrack_api.exception.AccessorParentResourceNotFoundError` + if container of *resource_identifier* could not be determined. + + ''' + + def remove_container(self, resource_identifier): # pragma: no cover + '''Remove container at *resource_identifier*.''' + return self.remove(resource_identifier) + + def get_filesystem_path(self, resource_identifier): # pragma: no cover + '''Return filesystem path for *resource_identifier*. + + Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if + filesystem path could not be determined from *resource_identifier* or + :exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if + retrieving filesystem paths is not supported by this accessor. + + ''' + raise ftrack_api.exception.AccessorUnsupportedOperationError( + 'get_filesystem_path', resource_identifier=resource_identifier + ) + + def get_url(self, resource_identifier): + '''Return URL for *resource_identifier*. + + Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if + URL could not be determined from *resource_identifier* or + :exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if + retrieving URL is not supported by this accessor. + + ''' + raise ftrack_api.exception.AccessorUnsupportedOperationError( + 'get_url', resource_identifier=resource_identifier + ) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/disk.py b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/disk.py new file mode 100644 index 0000000000..20a2d4b535 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/disk.py @@ -0,0 +1,251 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2013 ftrack + +import os +import sys +import errno +import contextlib + +import ftrack_api._python_ntpath as ntpath +import ftrack_api.accessor.base +import ftrack_api.data +from ftrack_api.exception import ( + AccessorFilesystemPathError, + AccessorUnsupportedOperationError, + AccessorResourceNotFoundError, + AccessorOperationFailedError, + AccessorPermissionDeniedError, + AccessorResourceInvalidError, + AccessorContainerNotEmptyError, + AccessorParentResourceNotFoundError +) + + +class DiskAccessor(ftrack_api.accessor.base.Accessor): + '''Provide disk access to a location. + + Expect resource identifiers to refer to relative filesystem paths. + + ''' + + def __init__(self, prefix, **kw): + '''Initialise location accessor. + + *prefix* specifies the base folder for the disk based structure and + will be prepended to any path. It should be specified in the syntax of + the current OS. + + ''' + if prefix: + prefix = os.path.expanduser(os.path.expandvars(prefix)) + prefix = os.path.abspath(prefix) + self.prefix = prefix + + super(DiskAccessor, self).__init__(**kw) + + def list(self, resource_identifier): + '''Return list of entries in *resource_identifier* container. + + Each entry in the returned list should be a valid resource identifier. + + Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if + *resource_identifier* does not exist or + :exc:`~ftrack_api.exception.AccessorResourceInvalidError` if + *resource_identifier* is not a container. + + ''' + filesystem_path = self.get_filesystem_path(resource_identifier) + + with error_handler( + operation='list', resource_identifier=resource_identifier + ): + listing = [] + for entry in os.listdir(filesystem_path): + listing.append(os.path.join(resource_identifier, entry)) + + return listing + + def exists(self, resource_identifier): + '''Return if *resource_identifier* is valid and exists in location.''' + filesystem_path = self.get_filesystem_path(resource_identifier) + return os.path.exists(filesystem_path) + + def is_file(self, resource_identifier): + '''Return whether *resource_identifier* refers to a file.''' + filesystem_path = self.get_filesystem_path(resource_identifier) + return os.path.isfile(filesystem_path) + + def is_container(self, resource_identifier): + '''Return whether *resource_identifier* refers to a container.''' + filesystem_path = self.get_filesystem_path(resource_identifier) + return os.path.isdir(filesystem_path) + + def is_sequence(self, resource_identifier): + '''Return whether *resource_identifier* refers to a file sequence.''' + raise AccessorUnsupportedOperationError(operation='is_sequence') + + def open(self, resource_identifier, mode='rb'): + '''Return :class:`~ftrack_api.Data` for *resource_identifier*.''' + filesystem_path = self.get_filesystem_path(resource_identifier) + + with error_handler( + operation='open', resource_identifier=resource_identifier + ): + data = ftrack_api.data.File(filesystem_path, mode) + + return data + + def remove(self, resource_identifier): + '''Remove *resource_identifier*. + + Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if + *resource_identifier* does not exist. + + ''' + filesystem_path = self.get_filesystem_path(resource_identifier) + + if self.is_file(resource_identifier): + with error_handler( + operation='remove', resource_identifier=resource_identifier + ): + os.remove(filesystem_path) + + elif self.is_container(resource_identifier): + with error_handler( + operation='remove', resource_identifier=resource_identifier + ): + os.rmdir(filesystem_path) + + else: + raise AccessorResourceNotFoundError( + resource_identifier=resource_identifier + ) + + def make_container(self, resource_identifier, recursive=True): + '''Make a container at *resource_identifier*. + + If *recursive* is True, also make any intermediate containers. + + ''' + filesystem_path = self.get_filesystem_path(resource_identifier) + + with error_handler( + operation='makeContainer', resource_identifier=resource_identifier + ): + try: + if recursive: + os.makedirs(filesystem_path) + else: + try: + os.mkdir(filesystem_path) + except OSError as error: + if error.errno == errno.ENOENT: + raise AccessorParentResourceNotFoundError( + resource_identifier=resource_identifier + ) + else: + raise + + except OSError as error: + if error.errno != errno.EEXIST: + raise + + def get_container(self, resource_identifier): + '''Return resource_identifier of container for *resource_identifier*. + + Raise :exc:`~ftrack_api.exception.AccessorParentResourceNotFoundError` if + container of *resource_identifier* could not be determined. + + ''' + filesystem_path = self.get_filesystem_path(resource_identifier) + + container = os.path.dirname(filesystem_path) + + if self.prefix: + if not container.startswith(self.prefix): + raise AccessorParentResourceNotFoundError( + resource_identifier=resource_identifier, + message='Could not determine container for ' + '{resource_identifier} as container falls outside ' + 'of configured prefix.' + ) + + # Convert container filesystem path into resource identifier. + container = container[len(self.prefix):] + if ntpath.isabs(container): + # Ensure that resulting path is relative by stripping any + # leftover prefixed slashes from string. + # E.g. If prefix was '/tmp' and path was '/tmp/foo/bar' the + # result will be 'foo/bar'. + container = container.lstrip('\\/') + + return container + + def get_filesystem_path(self, resource_identifier): + '''Return filesystem path for *resource_identifier*. + + For example:: + + >>> accessor = DiskAccessor('my.location', '/mountpoint') + >>> print accessor.get_filesystem_path('test.txt') + /mountpoint/test.txt + >>> print accessor.get_filesystem_path('/mountpoint/test.txt') + /mountpoint/test.txt + + Raise :exc:`ftrack_api.exception.AccessorFilesystemPathError` if filesystem + path could not be determined from *resource_identifier*. + + ''' + filesystem_path = resource_identifier + if filesystem_path: + filesystem_path = os.path.normpath(filesystem_path) + + if self.prefix: + if not os.path.isabs(filesystem_path): + filesystem_path = os.path.normpath( + os.path.join(self.prefix, filesystem_path) + ) + + if not filesystem_path.startswith(self.prefix): + raise AccessorFilesystemPathError( + resource_identifier=resource_identifier, + message='Could not determine access path for ' + 'resource_identifier outside of configured prefix: ' + '{resource_identifier}.' + ) + + return filesystem_path + + +@contextlib.contextmanager +def error_handler(**kw): + '''Conform raised OSError/IOError exception to appropriate FTrack error.''' + try: + yield + + except (OSError, IOError) as error: + (exception_type, exception_value, traceback) = sys.exc_info() + kw.setdefault('error', error) + + + error_code = getattr(error, 'errno') + if not error_code: + raise AccessorOperationFailedError(**kw) + + if error_code == errno.ENOENT: + raise AccessorResourceNotFoundError(**kw) + + elif error_code == errno.EPERM: + raise AccessorPermissionDeniedError(**kw) + + elif error_code == errno.ENOTEMPTY: + raise AccessorContainerNotEmptyError(**kw) + + elif error_code in (errno.ENOTDIR, errno.EISDIR, errno.EINVAL): + raise AccessorResourceInvalidError(**kw) + + else: + raise AccessorOperationFailedError(**kw) + + except Exception: + raise diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/server.py b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/server.py new file mode 100644 index 0000000000..0fc5f43f30 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/accessor/server.py @@ -0,0 +1,240 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +import os +import hashlib +import base64 +import json + +import requests + +from .base import Accessor +from ..data import String +import ftrack_api.exception +import ftrack_api.symbol + + +class ServerFile(String): + '''Representation of a server file.''' + + def __init__(self, resource_identifier, session, mode='rb'): + '''Initialise file.''' + self.mode = mode + self.resource_identifier = resource_identifier + self._session = session + self._has_read = False + + super(ServerFile, self).__init__() + + def flush(self): + '''Flush all changes.''' + super(ServerFile, self).flush() + + if self.mode == 'wb': + self._write() + + def read(self, limit=None): + '''Read file.''' + if not self._has_read: + self._read() + self._has_read = True + + return super(ServerFile, self).read(limit) + + def _read(self): + '''Read all remote content from key into wrapped_file.''' + position = self.tell() + self.seek(0) + + response = requests.get( + '{0}/component/get'.format(self._session.server_url), + params={ + 'id': self.resource_identifier, + 'username': self._session.api_user, + 'apiKey': self._session.api_key + }, + stream=True + ) + + try: + response.raise_for_status() + except requests.exceptions.HTTPError as error: + raise ftrack_api.exception.AccessorOperationFailedError( + 'Failed to read data: {0}.'.format(error) + ) + + for block in response.iter_content(ftrack_api.symbol.CHUNK_SIZE): + self.wrapped_file.write(block) + + self.flush() + self.seek(position) + + def _write(self): + '''Write current data to remote key.''' + position = self.tell() + self.seek(0) + + # Retrieve component from cache to construct a filename. + component = self._session.get('FileComponent', self.resource_identifier) + if not component: + raise ftrack_api.exception.AccessorOperationFailedError( + 'Unable to retrieve component with id: {0}.'.format( + self.resource_identifier + ) + ) + + # Construct a name from component name and file_type. + name = component['name'] + if component['file_type']: + name = u'{0}.{1}'.format( + name, + component['file_type'].lstrip('.') + ) + + try: + metadata = self._session.get_upload_metadata( + component_id=self.resource_identifier, + file_name=name, + file_size=self._get_size(), + checksum=self._compute_checksum() + ) + except Exception as error: + raise ftrack_api.exception.AccessorOperationFailedError( + 'Failed to get put metadata: {0}.'.format(error) + ) + + # Ensure at beginning of file before put. + self.seek(0) + + # Put the file based on the metadata. + response = requests.put( + metadata['url'], + data=self.wrapped_file, + headers=metadata['headers'] + ) + + try: + response.raise_for_status() + except requests.exceptions.HTTPError as error: + raise ftrack_api.exception.AccessorOperationFailedError( + 'Failed to put file to server: {0}.'.format(error) + ) + + self.seek(position) + + def _get_size(self): + '''Return size of file in bytes.''' + position = self.tell() + self.seek(0, os.SEEK_END) + length = self.tell() + self.seek(position) + return length + + def _compute_checksum(self): + '''Return checksum for file.''' + fp = self.wrapped_file + buf_size = ftrack_api.symbol.CHUNK_SIZE + hash_obj = hashlib.md5() + spos = fp.tell() + + s = fp.read(buf_size) + while s: + hash_obj.update(s) + s = fp.read(buf_size) + + base64_digest = base64.encodebytes(hash_obj.digest()).decode('utf-8') + if base64_digest[-1] == '\n': + base64_digest = base64_digest[0:-1] + + fp.seek(spos) + return base64_digest + + +class _ServerAccessor(Accessor): + '''Provide server location access.''' + + def __init__(self, session, **kw): + '''Initialise location accessor.''' + super(_ServerAccessor, self).__init__(**kw) + + self._session = session + + def open(self, resource_identifier, mode='rb'): + '''Return :py:class:`~ftrack_api.Data` for *resource_identifier*.''' + return ServerFile(resource_identifier, session=self._session, mode=mode) + + def remove(self, resourceIdentifier): + '''Remove *resourceIdentifier*.''' + response = requests.get( + '{0}/component/remove'.format(self._session.server_url), + params={ + 'id': resourceIdentifier, + 'username': self._session.api_user, + 'apiKey': self._session.api_key + } + ) + if response.status_code != 200: + raise ftrack_api.exception.AccessorOperationFailedError( + 'Failed to remove file.' + ) + + def get_container(self, resource_identifier): + '''Return resource_identifier of container for *resource_identifier*.''' + return None + + def make_container(self, resource_identifier, recursive=True): + '''Make a container at *resource_identifier*.''' + + def list(self, resource_identifier): + '''Return list of entries in *resource_identifier* container.''' + raise NotImplementedError() + + def exists(self, resource_identifier): + '''Return if *resource_identifier* is valid and exists in location.''' + return False + + def is_file(self, resource_identifier): + '''Return whether *resource_identifier* refers to a file.''' + raise NotImplementedError() + + def is_container(self, resource_identifier): + '''Return whether *resource_identifier* refers to a container.''' + raise NotImplementedError() + + def is_sequence(self, resource_identifier): + '''Return whether *resource_identifier* refers to a file sequence.''' + raise NotImplementedError() + + def get_url(self, resource_identifier): + '''Return url for *resource_identifier*.''' + url_string = ( + u'{url}/component/get?id={id}&username={username}' + u'&apiKey={apiKey}' + ) + return url_string.format( + url=self._session.server_url, + id=resource_identifier, + username=self._session.api_user, + apiKey=self._session.api_key + ) + + def get_thumbnail_url(self, resource_identifier, size=None): + '''Return thumbnail url for *resource_identifier*. + + Optionally, specify *size* to constrain the downscaled image to size + x size pixels. + ''' + url_string = ( + u'{url}/component/thumbnail?id={id}&username={username}' + u'&apiKey={apiKey}' + ) + url = url_string.format( + url=self._session.server_url, + id=resource_identifier, + username=self._session.api_user, + apiKey=self._session.api_key + ) + if size: + url += u'&size={0}'.format(size) + + return url diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/attribute.py b/pype/modules/ftrack/python2_vendor/ftrack_api/attribute.py new file mode 100644 index 0000000000..29a0387161 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/attribute.py @@ -0,0 +1,708 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from __future__ import absolute_import + +from builtins import object +import collections +import copy +import logging +import functools + +import ftrack_api.symbol +import ftrack_api.exception +import ftrack_api.collection +import ftrack_api.inspection +import ftrack_api.operation + +logger = logging.getLogger( + __name__ +) + + +def merge_references(function): + '''Decorator to handle merging of references / collections.''' + + @functools.wraps(function) + def get_value(attribute, entity): + '''Merge the attribute with the local cache.''' + + if attribute.name not in entity._inflated: + # Only merge on first access to avoid + # inflating them multiple times. + + logger.debug( + 'Merging potential new data into attached ' + 'entity for attribute {0}.'.format( + attribute.name + ) + ) + + # Local attributes. + local_value = attribute.get_local_value(entity) + if isinstance( + local_value, + ( + ftrack_api.entity.base.Entity, + ftrack_api.collection.Collection, + ftrack_api.collection.MappedCollectionProxy + ) + ): + logger.debug( + 'Merging local value for attribute {0}.'.format(attribute) + ) + + merged_local_value = entity.session._merge( + local_value, merged=dict() + ) + + if merged_local_value is not local_value: + with entity.session.operation_recording(False): + attribute.set_local_value(entity, merged_local_value) + + # Remote attributes. + remote_value = attribute.get_remote_value(entity) + if isinstance( + remote_value, + ( + ftrack_api.entity.base.Entity, + ftrack_api.collection.Collection, + ftrack_api.collection.MappedCollectionProxy + ) + ): + logger.debug( + 'Merging remote value for attribute {0}.'.format(attribute) + ) + + merged_remote_value = entity.session._merge( + remote_value, merged=dict() + ) + + if merged_remote_value is not remote_value: + attribute.set_remote_value(entity, merged_remote_value) + + entity._inflated.add( + attribute.name + ) + + return function( + attribute, entity + ) + + return get_value + + +class Attributes(object): + '''Collection of properties accessible by name.''' + + def __init__(self, attributes=None): + super(Attributes, self).__init__() + self._data = dict() + if attributes is not None: + for attribute in attributes: + self.add(attribute) + + def add(self, attribute): + '''Add *attribute*.''' + existing = self._data.get(attribute.name, None) + if existing: + raise ftrack_api.exception.NotUniqueError( + 'Attribute with name {0} already added as {1}' + .format(attribute.name, existing) + ) + + self._data[attribute.name] = attribute + + def remove(self, attribute): + '''Remove attribute.''' + self._data.pop(attribute.name) + + def get(self, name): + '''Return attribute by *name*. + + If no attribute matches *name* then return None. + + ''' + return self._data.get(name, None) + + def keys(self): + '''Return list of attribute names.''' + return list(self._data.keys()) + + def __contains__(self, item): + '''Return whether *item* present.''' + if not isinstance(item, Attribute): + return False + + return item.name in self._data + + def __iter__(self): + '''Return iterator over attributes.''' + return iter(self._data.values()) + + def __len__(self): + '''Return count of attributes.''' + return len(self._data) + + +class Attribute(object): + '''A name and value pair persisted remotely.''' + + def __init__( + self, name, default_value=ftrack_api.symbol.NOT_SET, mutable=True, + computed=False + ): + '''Initialise attribute with *name*. + + *default_value* represents the default value for the attribute. It may + be a callable. It is not used within the attribute when providing + values, but instead exists for other parts of the system to reference. + + If *mutable* is set to False then the local value of the attribute on an + entity can only be set when both the existing local and remote values + are :attr:`ftrack_api.symbol.NOT_SET`. The exception to this is when the + target value is also :attr:`ftrack_api.symbol.NOT_SET`. + + If *computed* is set to True the value is a remote side computed value + and should not be long-term cached. + + ''' + super(Attribute, self).__init__() + self._name = name + self._mutable = mutable + self._computed = computed + self.default_value = default_value + + self._local_key = 'local' + self._remote_key = 'remote' + + def __repr__(self): + '''Return representation of entity.''' + return '<{0}.{1}({2}) object at {3}>'.format( + self.__module__, + self.__class__.__name__, + self.name, + id(self) + ) + + def get_entity_storage(self, entity): + '''Return attribute storage on *entity* creating if missing.''' + storage_key = '_ftrack_attribute_storage' + storage = getattr(entity, storage_key, None) + if storage is None: + storage = collections.defaultdict( + lambda: + { + self._local_key: ftrack_api.symbol.NOT_SET, + self._remote_key: ftrack_api.symbol.NOT_SET + } + ) + setattr(entity, storage_key, storage) + + return storage + + @property + def name(self): + '''Return name.''' + return self._name + + @property + def mutable(self): + '''Return whether attribute is mutable.''' + return self._mutable + + @property + def computed(self): + '''Return whether attribute is computed.''' + return self._computed + + def get_value(self, entity): + '''Return current value for *entity*. + + If a value was set locally then return it, otherwise return last known + remote value. If no remote value yet retrieved, make a request for it + via the session and block until available. + + ''' + value = self.get_local_value(entity) + if value is not ftrack_api.symbol.NOT_SET: + return value + + value = self.get_remote_value(entity) + if value is not ftrack_api.symbol.NOT_SET: + return value + + if not entity.session.auto_populate: + return value + + self.populate_remote_value(entity) + return self.get_remote_value(entity) + + def get_local_value(self, entity): + '''Return locally set value for *entity*.''' + storage = self.get_entity_storage(entity) + return storage[self.name][self._local_key] + + def get_remote_value(self, entity): + '''Return remote value for *entity*. + + .. note:: + + Only return locally stored remote value, do not fetch from remote. + + ''' + storage = self.get_entity_storage(entity) + return storage[self.name][self._remote_key] + + def set_local_value(self, entity, value): + '''Set local *value* for *entity*.''' + if ( + not self.mutable + and self.is_set(entity) + and value is not ftrack_api.symbol.NOT_SET + ): + raise ftrack_api.exception.ImmutableAttributeError(self) + + old_value = self.get_local_value(entity) + + storage = self.get_entity_storage(entity) + storage[self.name][self._local_key] = value + + # Record operation. + if entity.session.record_operations: + entity.session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + entity.entity_type, + ftrack_api.inspection.primary_key(entity), + self.name, + old_value, + value + ) + ) + + def set_remote_value(self, entity, value): + '''Set remote *value*. + + .. note:: + + Only set locally stored remote value, do not persist to remote. + + ''' + storage = self.get_entity_storage(entity) + storage[self.name][self._remote_key] = value + + def populate_remote_value(self, entity): + '''Populate remote value for *entity*.''' + entity.session.populate([entity], self.name) + + def is_modified(self, entity): + '''Return whether local value set and differs from remote. + + .. note:: + + Will not fetch remote value so may report True even when values + are the same on the remote. + + ''' + local_value = self.get_local_value(entity) + remote_value = self.get_remote_value(entity) + return ( + local_value is not ftrack_api.symbol.NOT_SET + and local_value != remote_value + ) + + def is_set(self, entity): + '''Return whether a value is set for *entity*.''' + return any([ + self.get_local_value(entity) is not ftrack_api.symbol.NOT_SET, + self.get_remote_value(entity) is not ftrack_api.symbol.NOT_SET + ]) + + +class ScalarAttribute(Attribute): + '''Represent a scalar value.''' + + def __init__(self, name, data_type, **kw): + '''Initialise property.''' + super(ScalarAttribute, self).__init__(name, **kw) + self.data_type = data_type + + +class ReferenceAttribute(Attribute): + '''Reference another entity.''' + + def __init__(self, name, entity_type, **kw): + '''Initialise property.''' + super(ReferenceAttribute, self).__init__(name, **kw) + self.entity_type = entity_type + + def populate_remote_value(self, entity): + '''Populate remote value for *entity*. + + As attribute references another entity, use that entity's configured + default projections to auto populate useful attributes when loading. + + ''' + reference_entity_type = entity.session.types[self.entity_type] + default_projections = reference_entity_type.default_projections + + projections = [] + if default_projections: + for projection in default_projections: + projections.append('{0}.{1}'.format(self.name, projection)) + else: + projections.append(self.name) + + entity.session.populate([entity], ', '.join(projections)) + + def is_modified(self, entity): + '''Return whether a local value has been set and differs from remote. + + .. note:: + + Will not fetch remote value so may report True even when values + are the same on the remote. + + ''' + local_value = self.get_local_value(entity) + remote_value = self.get_remote_value(entity) + + if local_value is ftrack_api.symbol.NOT_SET: + return False + + if remote_value is ftrack_api.symbol.NOT_SET: + return True + + if ( + ftrack_api.inspection.identity(local_value) + != ftrack_api.inspection.identity(remote_value) + ): + return True + + return False + + + @merge_references + def get_value(self, entity): + return super(ReferenceAttribute, self).get_value( + entity + ) + +class AbstractCollectionAttribute(Attribute): + '''Base class for collection attributes.''' + + #: Collection class used by attribute. + collection_class = None + + @merge_references + def get_value(self, entity): + '''Return current value for *entity*. + + If a value was set locally then return it, otherwise return last known + remote value. If no remote value yet retrieved, make a request for it + via the session and block until available. + + .. note:: + + As value is a collection that is mutable, will transfer a remote + value into the local value on access if no local value currently + set. + + ''' + super(AbstractCollectionAttribute, self).get_value(entity) + + # Conditionally, copy remote value into local value so that it can be + # mutated without side effects. + local_value = self.get_local_value(entity) + remote_value = self.get_remote_value(entity) + if ( + local_value is ftrack_api.symbol.NOT_SET + and isinstance(remote_value, self.collection_class) + ): + try: + with entity.session.operation_recording(False): + self.set_local_value(entity, copy.copy(remote_value)) + except ftrack_api.exception.ImmutableAttributeError: + pass + + value = self.get_local_value(entity) + + # If the local value is still not set then attempt to set it with a + # suitable placeholder collection so that the caller can interact with + # the collection using its normal interface. This is required for a + # newly created entity for example. It *could* be done as a simple + # default value, but that would incur cost for every collection even + # when they are not modified before commit. + if value is ftrack_api.symbol.NOT_SET: + try: + with entity.session.operation_recording(False): + self.set_local_value( + entity, + # None should be treated as empty collection. + None + ) + except ftrack_api.exception.ImmutableAttributeError: + pass + + return self.get_local_value(entity) + + def set_local_value(self, entity, value): + '''Set local *value* for *entity*.''' + if value is not ftrack_api.symbol.NOT_SET: + value = self._adapt_to_collection(entity, value) + value.mutable = self.mutable + + super(AbstractCollectionAttribute, self).set_local_value(entity, value) + + def set_remote_value(self, entity, value): + '''Set remote *value*. + + .. note:: + + Only set locally stored remote value, do not persist to remote. + + ''' + if value is not ftrack_api.symbol.NOT_SET: + value = self._adapt_to_collection(entity, value) + value.mutable = False + + super(AbstractCollectionAttribute, self).set_remote_value(entity, value) + + def _adapt_to_collection(self, entity, value): + '''Adapt *value* to appropriate collection instance for *entity*. + + .. note:: + + If *value* is None then return a suitable empty collection. + + ''' + raise NotImplementedError() + + +class CollectionAttribute(AbstractCollectionAttribute): + '''Represent a collection of other entities.''' + + #: Collection class used by attribute. + collection_class = ftrack_api.collection.Collection + + def _adapt_to_collection(self, entity, value): + '''Adapt *value* to a Collection instance on *entity*.''' + + if not isinstance(value, ftrack_api.collection.Collection): + + if value is None: + value = ftrack_api.collection.Collection(entity, self) + + elif isinstance(value, list): + value = ftrack_api.collection.Collection( + entity, self, data=value + ) + + else: + raise NotImplementedError( + 'Cannot convert {0!r} to collection.'.format(value) + ) + + else: + if value.attribute is not self: + raise ftrack_api.exception.AttributeError( + 'Collection already bound to a different attribute' + ) + + return value + + +class KeyValueMappedCollectionAttribute(AbstractCollectionAttribute): + '''Represent a mapped key, value collection of entities.''' + + #: Collection class used by attribute. + collection_class = ftrack_api.collection.KeyValueMappedCollectionProxy + + def __init__( + self, name, creator, key_attribute, value_attribute, **kw + ): + '''Initialise attribute with *name*. + + *creator* should be a function that accepts a dictionary of data and + is used by the referenced collection to create new entities in the + collection. + + *key_attribute* should be the name of the attribute on an entity in + the collection that represents the value for 'key' of the dictionary. + + *value_attribute* should be the name of the attribute on an entity in + the collection that represents the value for 'value' of the dictionary. + + ''' + self.creator = creator + self.key_attribute = key_attribute + self.value_attribute = value_attribute + + super(KeyValueMappedCollectionAttribute, self).__init__(name, **kw) + + def _adapt_to_collection(self, entity, value): + '''Adapt *value* to an *entity*.''' + if not isinstance( + value, ftrack_api.collection.KeyValueMappedCollectionProxy + ): + + if value is None: + value = ftrack_api.collection.KeyValueMappedCollectionProxy( + ftrack_api.collection.Collection(entity, self), + self.creator, self.key_attribute, + self.value_attribute + ) + + elif isinstance(value, (list, ftrack_api.collection.Collection)): + + if isinstance(value, list): + value = ftrack_api.collection.Collection( + entity, self, data=value + ) + + value = ftrack_api.collection.KeyValueMappedCollectionProxy( + value, self.creator, self.key_attribute, + self.value_attribute + ) + + elif isinstance(value, collections.Mapping): + # Convert mapping. + # TODO: When backend model improves, revisit this logic. + # First get existing value and delete all references. This is + # needed because otherwise they will not be automatically + # removed server side. + # The following should not cause recursion as the internal + # values should be mapped collections already. + current_value = self.get_value(entity) + if not isinstance( + current_value, + ftrack_api.collection.KeyValueMappedCollectionProxy + ): + raise NotImplementedError( + 'Cannot adapt mapping to collection as current value ' + 'type is not a KeyValueMappedCollectionProxy.' + ) + + # Create the new collection using the existing collection as + # basis. Then update through proxy interface to ensure all + # internal operations called consistently (such as entity + # deletion for key removal). + collection = ftrack_api.collection.Collection( + entity, self, data=current_value.collection[:] + ) + collection_proxy = ( + ftrack_api.collection.KeyValueMappedCollectionProxy( + collection, self.creator, + self.key_attribute, self.value_attribute + ) + ) + + # Remove expired keys from collection. + expired_keys = set(current_value.keys()) - set(value.keys()) + for key in expired_keys: + del collection_proxy[key] + + # Set new values for existing keys / add new keys. + for key, value in list(value.items()): + collection_proxy[key] = value + + value = collection_proxy + + else: + raise NotImplementedError( + 'Cannot convert {0!r} to collection.'.format(value) + ) + else: + if value.attribute is not self: + raise ftrack_api.exception.AttributeError( + 'Collection already bound to a different attribute.' + ) + + return value + + +class CustomAttributeCollectionAttribute(AbstractCollectionAttribute): + '''Represent a mapped custom attribute collection of entities.''' + + #: Collection class used by attribute. + collection_class = ( + ftrack_api.collection.CustomAttributeCollectionProxy + ) + + def _adapt_to_collection(self, entity, value): + '''Adapt *value* to an *entity*.''' + if not isinstance( + value, ftrack_api.collection.CustomAttributeCollectionProxy + ): + + if value is None: + value = ftrack_api.collection.CustomAttributeCollectionProxy( + ftrack_api.collection.Collection(entity, self) + ) + + elif isinstance(value, (list, ftrack_api.collection.Collection)): + + # Why are we creating a new if it is a list? This will cause + # any merge to create a new proxy and collection. + if isinstance(value, list): + value = ftrack_api.collection.Collection( + entity, self, data=value + ) + + value = ftrack_api.collection.CustomAttributeCollectionProxy( + value + ) + + elif isinstance(value, collections.Mapping): + # Convert mapping. + # TODO: When backend model improves, revisit this logic. + # First get existing value and delete all references. This is + # needed because otherwise they will not be automatically + # removed server side. + # The following should not cause recursion as the internal + # values should be mapped collections already. + current_value = self.get_value(entity) + if not isinstance( + current_value, + ftrack_api.collection.CustomAttributeCollectionProxy + ): + raise NotImplementedError( + 'Cannot adapt mapping to collection as current value ' + 'type is not a MappedCollectionProxy.' + ) + + # Create the new collection using the existing collection as + # basis. Then update through proxy interface to ensure all + # internal operations called consistently (such as entity + # deletion for key removal). + collection = ftrack_api.collection.Collection( + entity, self, data=current_value.collection[:] + ) + collection_proxy = ( + ftrack_api.collection.CustomAttributeCollectionProxy( + collection + ) + ) + + # Remove expired keys from collection. + expired_keys = set(current_value.keys()) - set(value.keys()) + for key in expired_keys: + del collection_proxy[key] + + # Set new values for existing keys / add new keys. + for key, value in list(value.items()): + collection_proxy[key] = value + + value = collection_proxy + + else: + raise NotImplementedError( + 'Cannot convert {0!r} to collection.'.format(value) + ) + else: + if value.attribute is not self: + raise ftrack_api.exception.AttributeError( + 'Collection already bound to a different attribute.' + ) + + return value diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/cache.py b/pype/modules/ftrack/python2_vendor/ftrack_api/cache.py new file mode 100644 index 0000000000..3ca4c3e7e9 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/cache.py @@ -0,0 +1,608 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +'''Caching framework. + +Defines a standardised :class:`Cache` interface for storing data against +specific keys. Key generation is also standardised using a :class:`KeyMaker` +interface. + +Combining a Cache and KeyMaker allows for memoisation of function calls with +respect to the arguments used by using a :class:`Memoiser`. + +As a convenience a simple :func:`memoise` decorator is included for quick +memoisation of function using a global cache and standard key maker. + +''' + +from future import standard_library +standard_library.install_aliases() +from builtins import str +from six import string_types +from builtins import object +import collections +import functools +import abc +import copy +import inspect +import re +import six + +try: + # Python 2.x + import anydbm +except ImportError: + import dbm as anydbm + + +import contextlib +from future.utils import with_metaclass + +try: + try: + import _pickle as pickle + except: + import six + from six.moves import cPickle as pickle +except: + try: + import cPickle as pickle + except: + import pickle + +import ftrack_api.inspection +import ftrack_api.symbol + + +class Cache(with_metaclass(abc.ABCMeta, object)): + '''Cache interface. + + Derive from this to define concrete cache implementations. A cache is + centered around the concept of key:value pairings where the key is unique + across the cache. + + ''' + + @abc.abstractmethod + def get(self, key): + '''Return value for *key*. + + Raise :exc:`KeyError` if *key* not found. + + ''' + + @abc.abstractmethod + def set(self, key, value): + '''Set *value* for *key*.''' + + @abc.abstractmethod + def remove(self, key): + '''Remove *key* and return stored value. + + Raise :exc:`KeyError` if *key* not found. + + ''' + + def keys(self): + '''Return list of keys at this current time. + + .. warning:: + + Actual keys may differ from those returned due to timing of access. + + ''' + raise NotImplementedError() # pragma: no cover + + def values(self): + '''Return values for current keys.''' + values = [] + for key in list(self.keys()): + try: + value = self.get(key) + except KeyError: + continue + else: + values.append(value) + + return values + + def clear(self, pattern=None): + '''Remove all keys matching *pattern*. + + *pattern* should be a regular expression string. + + If *pattern* is None then all keys will be removed. + + ''' + + if pattern is not None: + pattern = re.compile(pattern) + + for key in list(self.keys()): + if pattern is not None: + if not pattern.search(key): + continue + + try: + self.remove(key) + except KeyError: + pass + + +class ProxyCache(Cache): + '''Proxy another cache.''' + + def __init__(self, proxied): + '''Initialise cache with *proxied* cache instance.''' + self.proxied = proxied + super(ProxyCache, self).__init__() + + def get(self, key): + '''Return value for *key*. + + Raise :exc:`KeyError` if *key* not found. + + ''' + return self.proxied.get(key) + + def set(self, key, value): + '''Set *value* for *key*.''' + return self.proxied.set(key, value) + + def remove(self, key): + '''Remove *key* and return stored value. + + Raise :exc:`KeyError` if *key* not found. + + ''' + return self.proxied.remove(key) + + def keys(self): + '''Return list of keys at this current time. + + .. warning:: + + Actual keys may differ from those returned due to timing of access. + + ''' + return list(self.proxied.keys()) + + +class LayeredCache(Cache): + '''Layered cache.''' + + def __init__(self, caches): + '''Initialise cache with *caches*.''' + super(LayeredCache, self).__init__() + self.caches = caches + + def get(self, key): + '''Return value for *key*. + + Raise :exc:`KeyError` if *key* not found. + + Attempt to retrieve from cache layers in turn, starting with shallowest. + If value retrieved, then also set the value in each higher level cache + up from where retrieved. + + ''' + target_caches = [] + value = ftrack_api.symbol.NOT_SET + + for cache in self.caches: + try: + value = cache.get(key) + except KeyError: + target_caches.append(cache) + continue + else: + break + + if value is ftrack_api.symbol.NOT_SET: + raise KeyError(key) + + # Set value on all higher level caches. + for cache in target_caches: + cache.set(key, value) + + return value + + def set(self, key, value): + '''Set *value* for *key*.''' + for cache in self.caches: + cache.set(key, value) + + def remove(self, key): + '''Remove *key*. + + Raise :exc:`KeyError` if *key* not found in any layer. + + ''' + removed = False + for cache in self.caches: + try: + cache.remove(key) + except KeyError: + pass + else: + removed = True + + if not removed: + raise KeyError(key) + + def keys(self): + '''Return list of keys at this current time. + + .. warning:: + + Actual keys may differ from those returned due to timing of access. + + ''' + keys = [] + for cache in self.caches: + keys.extend(list(cache.keys())) + + return list(set(keys)) + + +class MemoryCache(Cache): + '''Memory based cache.''' + + def __init__(self): + '''Initialise cache.''' + self._cache = {} + super(MemoryCache, self).__init__() + + def get(self, key): + '''Return value for *key*. + + Raise :exc:`KeyError` if *key* not found. + + ''' + return self._cache[key] + + def set(self, key, value): + '''Set *value* for *key*.''' + self._cache[key] = value + + def remove(self, key): + '''Remove *key*. + + Raise :exc:`KeyError` if *key* not found. + + ''' + del self._cache[key] + + def keys(self): + '''Return list of keys at this current time. + + .. warning:: + + Actual keys may differ from those returned due to timing of access. + + ''' + return list(self._cache.keys()) + + +class FileCache(Cache): + '''File based cache that uses :mod:`anydbm` module. + + .. note:: + + No locking of the underlying file is performed. + + ''' + + def __init__(self, path): + '''Initialise cache at *path*.''' + self.path = path + + # Initialise cache. + cache = anydbm.open(self.path, 'c') + cache.close() + + super(FileCache, self).__init__() + + @contextlib.contextmanager + def _database(self): + '''Yield opened database file.''' + cache = anydbm.open(self.path, 'w') + try: + yield cache + finally: + cache.close() + + def get(self, key): + '''Return value for *key*. + + Raise :exc:`KeyError` if *key* not found. + + ''' + with self._database() as cache: + return cache[key.encode('ascii')].decode('utf-8') + + def set(self, key, value): + '''Set *value* for *key*.''' + with self._database() as cache: + cache[key.encode('ascii')] = value + + def remove(self, key): + '''Remove *key*. + + Raise :exc:`KeyError` if *key* not found. + + ''' + with self._database() as cache: + del cache[key.encode('ascii')] + + def keys(self): + '''Return list of keys at this current time. + + .. warning:: + + Actual keys may differ from those returned due to timing of access. + + ''' + with self._database() as cache: + return [s.decode('utf-8') for s in cache.keys()] + #return list(map(str, cache.keys())) + + +class SerialisedCache(ProxyCache): + '''Proxied cache that stores values as serialised data.''' + + def __init__(self, proxied, encode=None, decode=None): + '''Initialise cache with *encode* and *decode* callables. + + *proxied* is the underlying cache to use for storage. + + ''' + self.encode = encode + self.decode = decode + super(SerialisedCache, self).__init__(proxied) + + def get(self, key): + '''Return value for *key*. + + Raise :exc:`KeyError` if *key* not found. + + ''' + value = super(SerialisedCache, self).get(key) + if self.decode: + value = self.decode(value) + + return value + + def set(self, key, value): + '''Set *value* for *key*.''' + if self.encode: + value = self.encode(value) + + super(SerialisedCache, self).set(key, value) + + +class KeyMaker(with_metaclass(abc.ABCMeta, object)): + '''Generate unique keys.''' + + def __init__(self): + '''Initialise key maker.''' + super(KeyMaker, self).__init__() + self.item_separator = '' + + def key(self, *items): + '''Return key for *items*.''' + keys = [] + for item in items: + keys.append(self._key(item)) + + return self.item_separator.join(keys) + + @abc.abstractmethod + def _key(self, obj): + '''Return key for *obj*.''' + + +class StringKeyMaker(KeyMaker): + '''Generate string key.''' + + def _key(self, obj): + '''Return key for *obj*.''' + return str(obj) + + +class ObjectKeyMaker(KeyMaker): + '''Generate unique keys for objects.''' + + def __init__(self): + '''Initialise key maker.''' + super(ObjectKeyMaker, self).__init__() + self.item_separator = b'\0' + self.mapping_identifier = b'\1' + self.mapping_pair_separator = b'\2' + self.iterable_identifier = b'\3' + self.name_identifier = b'\4' + + def _key(self, item): + return self.__key(item) + + def __key(self, item): + '''Return key for *item*. + + Returned key will be a pickle like string representing the *item*. This + allows for typically non-hashable objects to be used in key generation + (such as dictionaries). + + If *item* is iterable then each item in it shall also be passed to this + method to ensure correct key generation. + + Special markers are used to distinguish handling of specific cases in + order to ensure uniqueness of key corresponds directly to *item*. + + Example:: + + >>> key_maker = ObjectKeyMaker() + >>> def add(x, y): + ... "Return sum of *x* and *y*." + ... return x + y + ... + >>> key_maker.key(add, (1, 2)) + '\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x02.\x03' + >>> key_maker.key(add, (1, 3)) + '\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x03.\x03' + + ''' + + # Ensure p3k uses a protocol available in py2 so can decode it. + pickle_protocol = 2 + + # TODO: Consider using a more robust and comprehensive solution such as + # dill (https://github.com/uqfoundation/dill). + if isinstance(item, collections.Iterable): + + if isinstance(item, string_types): + return pickle.dumps(item, pickle_protocol) + + if isinstance(item, collections.Mapping): + contents = self.item_separator.join([ + ( + self._key(key) + + self.mapping_pair_separator + + self._key(value) + ) + for key, value in sorted(item.items()) + ]) + + return ( + self.mapping_identifier + + contents + + self.mapping_identifier + ) + else: + contents = self.item_separator.join([ + self._key(item) for item in item + ]) + return ( + self.iterable_identifier + + contents + + self.iterable_identifier + ) + + elif inspect.ismethod(item): + + return b''.join(( + self.name_identifier, + item.__name__.encode(), + self.item_separator, + item.__self__.__class__.__name__.encode(), + self.item_separator, + item.__module__.encode() + )) + + elif inspect.isfunction(item) or inspect.isclass(item): + return b''.join(( + self.name_identifier, + item.__name__.encode(), + self.item_separator, + item.__module__.encode() + )) + + elif inspect.isbuiltin(item): + return self.name_identifier + item.__name__.encode() + + else: + return pickle.dumps(item, pickle_protocol) + + +class Memoiser(object): + '''Memoise function calls using a :class:`KeyMaker` and :class:`Cache`. + + Example:: + + >>> memoiser = Memoiser(MemoryCache(), ObjectKeyMaker()) + >>> def add(x, y): + ... "Return sum of *x* and *y*." + ... print 'Called' + ... return x + y + ... + >>> memoiser.call(add, (1, 2), {}) + Called + >>> memoiser.call(add, (1, 2), {}) + >>> memoiser.call(add, (1, 3), {}) + Called + + ''' + + def __init__(self, cache=None, key_maker=None, return_copies=True): + '''Initialise with *cache* and *key_maker* to use. + + If *cache* is not specified a default :class:`MemoryCache` will be + used. Similarly, if *key_maker* is not specified a default + :class:`ObjectKeyMaker` will be used. + + If *return_copies* is True then all results returned from the cache will + be deep copies to avoid indirect mutation of cached values. + + ''' + self.cache = cache + if self.cache is None: + self.cache = MemoryCache() + + self.key_maker = key_maker + if self.key_maker is None: + self.key_maker = ObjectKeyMaker() + + self.return_copies = return_copies + super(Memoiser, self).__init__() + + def call(self, function, args=None, kw=None): + '''Call *function* with *args* and *kw* and return result. + + If *function* was previously called with exactly the same arguments + then return cached result if available. + + Store result for call in cache. + + ''' + if args is None: + args = () + + if kw is None: + kw = {} + + # Support arguments being passed as positionals or keywords. + arguments = inspect.getcallargs(function, *args, **kw) + + key = self.key_maker.key(function, arguments) + try: + value = self.cache.get(key) + + except KeyError: + value = function(*args, **kw) + self.cache.set(key, value) + + # If requested, deep copy value to return in order to avoid cached value + # being inadvertently altered by the caller. + if self.return_copies: + value = copy.deepcopy(value) + + return value + + +def memoise_decorator(memoiser): + '''Decorator to memoise function calls using *memoiser*.''' + def outer(function): + + @functools.wraps(function) + def inner(*args, **kw): + return memoiser.call(function, args, kw) + + return inner + + return outer + + +#: Default memoiser. +memoiser = Memoiser() + +#: Default memoise decorator using standard cache and key maker. +memoise = memoise_decorator(memoiser) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/collection.py b/pype/modules/ftrack/python2_vendor/ftrack_api/collection.py new file mode 100644 index 0000000000..d0536932d9 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/collection.py @@ -0,0 +1,515 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from __future__ import absolute_import +from __future__ import unicode_literals + +from builtins import str +import logging + +import collections +import copy + +import ftrack_api.exception +import ftrack_api.inspection +import ftrack_api.symbol +import ftrack_api.operation +import ftrack_api.cache +from ftrack_api.logging import LazyLogMessage as L + + +class Collection(collections.MutableSequence): + '''A collection of entities.''' + + def __init__(self, entity, attribute, mutable=True, data=None): + '''Initialise collection.''' + self.entity = entity + self.attribute = attribute + self._data = [] + self._identities = set() + + # Set initial dataset. + # Note: For initialisation, immutability is deferred till after initial + # population as otherwise there would be no public way to initialise an + # immutable collection. The reason self._data is not just set directly + # is to ensure other logic can be applied without special handling. + self.mutable = True + try: + if data is None: + data = [] + + with self.entity.session.operation_recording(False): + self.extend(data) + finally: + self.mutable = mutable + + def _identity_key(self, entity): + '''Return identity key for *entity*.''' + return str(ftrack_api.inspection.identity(entity)) + + def __copy__(self): + '''Return shallow copy. + + .. note:: + + To maintain expectations on usage, the shallow copy will include a + shallow copy of the underlying data store. + + ''' + cls = self.__class__ + copied_instance = cls.__new__(cls) + copied_instance.__dict__.update(self.__dict__) + copied_instance._data = copy.copy(self._data) + copied_instance._identities = copy.copy(self._identities) + + return copied_instance + + def _notify(self, old_value): + '''Notify about modification.''' + # Record operation. + if self.entity.session.record_operations: + self.entity.session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + self.entity.entity_type, + ftrack_api.inspection.primary_key(self.entity), + self.attribute.name, + old_value, + self + ) + ) + + def insert(self, index, item): + '''Insert *item* at *index*.''' + if not self.mutable: + raise ftrack_api.exception.ImmutableCollectionError(self) + + if item in self: + raise ftrack_api.exception.DuplicateItemInCollectionError( + item, self + ) + + old_value = copy.copy(self) + self._data.insert(index, item) + self._identities.add(self._identity_key(item)) + self._notify(old_value) + + def __contains__(self, value): + '''Return whether *value* present in collection.''' + return self._identity_key(value) in self._identities + + def __getitem__(self, index): + '''Return item at *index*.''' + return self._data[index] + + def __setitem__(self, index, item): + '''Set *item* against *index*.''' + if not self.mutable: + raise ftrack_api.exception.ImmutableCollectionError(self) + + try: + existing_index = self.index(item) + except ValueError: + pass + else: + if index != existing_index: + raise ftrack_api.exception.DuplicateItemInCollectionError( + item, self + ) + + old_value = copy.copy(self) + try: + existing_item = self._data[index] + except IndexError: + pass + else: + self._identities.remove(self._identity_key(existing_item)) + + self._data[index] = item + self._identities.add(self._identity_key(item)) + self._notify(old_value) + + def __delitem__(self, index): + '''Remove item at *index*.''' + if not self.mutable: + raise ftrack_api.exception.ImmutableCollectionError(self) + + old_value = copy.copy(self) + item = self._data[index] + del self._data[index] + self._identities.remove(self._identity_key(item)) + self._notify(old_value) + + def __len__(self): + '''Return count of items.''' + return len(self._data) + + def __eq__(self, other): + '''Return whether this collection is equal to *other*.''' + if not isinstance(other, Collection): + return False + + return sorted(self._identities) == sorted(other._identities) + + def __ne__(self, other): + '''Return whether this collection is not equal to *other*.''' + return not self == other + + +class MappedCollectionProxy(collections.MutableMapping): + '''Common base class for mapped collection of entities.''' + + def __init__(self, collection): + '''Initialise proxy for *collection*.''' + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + self.collection = collection + super(MappedCollectionProxy, self).__init__() + + def __copy__(self): + '''Return shallow copy. + + .. note:: + + To maintain expectations on usage, the shallow copy will include a + shallow copy of the underlying collection. + + ''' + cls = self.__class__ + copied_instance = cls.__new__(cls) + copied_instance.__dict__.update(self.__dict__) + copied_instance.collection = copy.copy(self.collection) + + return copied_instance + + @property + def mutable(self): + '''Return whether collection is mutable.''' + return self.collection.mutable + + @mutable.setter + def mutable(self, value): + '''Set whether collection is mutable to *value*.''' + self.collection.mutable = value + + @property + def attribute(self): + '''Return attribute bound to.''' + return self.collection.attribute + + @attribute.setter + def attribute(self, value): + '''Set bound attribute to *value*.''' + self.collection.attribute = value + + +class KeyValueMappedCollectionProxy(MappedCollectionProxy): + '''A mapped collection of key, value entities. + + Proxy a standard :class:`Collection` as a mapping where certain attributes + from the entities in the collection are mapped to key, value pairs. + + For example:: + + >>> collection = [Metadata(key='foo', value='bar'), ...] + >>> mapped = KeyValueMappedCollectionProxy( + ... collection, create_metadata, + ... key_attribute='key', value_attribute='value' + ... ) + >>> print mapped['foo'] + 'bar' + >>> mapped['bam'] = 'biz' + >>> print mapped.collection[-1] + Metadata(key='bam', value='biz') + + ''' + + def __init__( + self, collection, creator, key_attribute, value_attribute + ): + '''Initialise collection.''' + self.creator = creator + self.key_attribute = key_attribute + self.value_attribute = value_attribute + super(KeyValueMappedCollectionProxy, self).__init__(collection) + + def _get_entity_by_key(self, key): + '''Return entity instance with matching *key* from collection.''' + for entity in self.collection: + if entity[self.key_attribute] == key: + return entity + + raise KeyError(key) + + def __getitem__(self, key): + '''Return value for *key*.''' + entity = self._get_entity_by_key(key) + return entity[self.value_attribute] + + def __setitem__(self, key, value): + '''Set *value* for *key*.''' + try: + entity = self._get_entity_by_key(key) + except KeyError: + data = { + self.key_attribute: key, + self.value_attribute: value + } + entity = self.creator(self, data) + + if ( + ftrack_api.inspection.state(entity) is + ftrack_api.symbol.CREATED + ): + # Persisting this entity will be handled here, record the + # operation. + self.collection.append(entity) + + else: + # The entity is created and persisted separately by the + # creator. Do not record this operation. + with self.collection.entity.session.operation_recording(False): + # Do not record this operation since it will trigger + # redudant and potentially failing operations. + self.collection.append(entity) + + else: + entity[self.value_attribute] = value + + def __delitem__(self, key): + '''Remove and delete *key*. + + .. note:: + + The associated entity will be deleted as well. + + ''' + for index, entity in enumerate(self.collection): + if entity[self.key_attribute] == key: + break + else: + raise KeyError(key) + + del self.collection[index] + entity.session.delete(entity) + + def __iter__(self): + '''Iterate over all keys.''' + keys = set() + for entity in self.collection: + keys.add(entity[self.key_attribute]) + + return iter(keys) + + def __len__(self): + '''Return count of keys.''' + keys = set() + for entity in self.collection: + keys.add(entity[self.key_attribute]) + + return len(keys) + + def keys(self): + # COMPAT for unit tests.. + return list(super( + KeyValueMappedCollectionProxy, self + ).keys()) + + +class PerSessionDefaultKeyMaker(ftrack_api.cache.KeyMaker): + '''Generate key for session.''' + + def _key(self, obj): + '''Return key for *obj*.''' + if isinstance(obj, dict): + session = obj.get('session') + if session is not None: + # Key by session only. + return str(id(session)) + + return str(obj) + + +#: Memoiser for use with callables that should be called once per session. +memoise_session = ftrack_api.cache.memoise_decorator( + ftrack_api.cache.Memoiser( + key_maker=PerSessionDefaultKeyMaker(), return_copies=False + ) +) + + +@memoise_session +def _get_custom_attribute_configurations(session): + '''Return list of custom attribute configurations. + + The configuration objects will have key, project_id, id and object_type_id + populated. + + ''' + return session.query( + 'select key, project_id, id, object_type_id, entity_type from ' + 'CustomAttributeConfiguration' + ).all() + + +class CustomAttributeCollectionProxy(MappedCollectionProxy): + '''A mapped collection of custom attribute value entities.''' + + def __init__( + self, collection + ): + '''Initialise collection.''' + self.key_attribute = 'configuration_id' + self.value_attribute = 'value' + super(CustomAttributeCollectionProxy, self).__init__(collection) + + def _get_entity_configurations(self): + '''Return all configurations for current collection entity.''' + entity = self.collection.entity + entity_type = None + project_id = None + object_type_id = None + + if 'object_type_id' in list(entity.keys()): + project_id = entity['project_id'] + entity_type = 'task' + object_type_id = entity['object_type_id'] + + if entity.entity_type == 'AssetVersion': + project_id = entity['asset']['parent']['project_id'] + entity_type = 'assetversion' + + if entity.entity_type == 'Asset': + project_id = entity['parent']['project_id'] + entity_type = 'asset' + + if entity.entity_type == 'Project': + project_id = entity['id'] + entity_type = 'show' + + if entity.entity_type == 'User': + entity_type = 'user' + + if entity_type is None: + raise ValueError( + 'Entity {!r} not supported.'.format(entity) + ) + + configurations = [] + for configuration in _get_custom_attribute_configurations( + entity.session + ): + if ( + configuration['entity_type'] == entity_type and + configuration['project_id'] in (project_id, None) and + configuration['object_type_id'] == object_type_id + ): + configurations.append(configuration) + + # Return with global configurations at the end of the list. This is done + # so that global conigurations are shadowed by project specific if the + # configurations list is looped when looking for a matching `key`. + return sorted( + configurations, key=lambda item: item['project_id'] is None + ) + + def _get_keys(self): + '''Return a list of all keys.''' + keys = [] + for configuration in self._get_entity_configurations(): + keys.append(configuration['key']) + + return keys + + def _get_entity_by_key(self, key): + '''Return entity instance with matching *key* from collection.''' + configuration_id = self.get_configuration_id_from_key(key) + for entity in self.collection: + if entity[self.key_attribute] == configuration_id: + return entity + + return None + + def get_configuration_id_from_key(self, key): + '''Return id of configuration with matching *key*. + + Raise :exc:`KeyError` if no configuration with matching *key* found. + + ''' + for configuration in self._get_entity_configurations(): + if key == configuration['key']: + return configuration['id'] + + raise KeyError(key) + + def __getitem__(self, key): + '''Return value for *key*.''' + entity = self._get_entity_by_key(key) + + if entity: + return entity[self.value_attribute] + + for configuration in self._get_entity_configurations(): + if configuration['key'] == key: + return configuration['default'] + + raise KeyError(key) + + def __setitem__(self, key, value): + '''Set *value* for *key*.''' + custom_attribute_value = self._get_entity_by_key(key) + + if custom_attribute_value: + custom_attribute_value[self.value_attribute] = value + else: + entity = self.collection.entity + session = entity.session + data = { + self.key_attribute: self.get_configuration_id_from_key(key), + self.value_attribute: value, + 'entity_id': entity['id'] + } + + # Make sure to use the currently active collection. This is + # necessary since a merge might have replaced the current one. + self.collection.entity['custom_attributes'].collection.append( + session.create('CustomAttributeValue', data) + ) + + def __delitem__(self, key): + '''Remove and delete *key*. + + .. note:: + + The associated entity will be deleted as well. + + ''' + custom_attribute_value = self._get_entity_by_key(key) + + if custom_attribute_value: + index = self.collection.index(custom_attribute_value) + del self.collection[index] + + custom_attribute_value.session.delete(custom_attribute_value) + else: + self.logger.warning(L( + 'Cannot delete {0!r} on {1!r}, no custom attribute value set.', + key, self.collection.entity + )) + + def __eq__(self, collection): + '''Return True if *collection* equals proxy collection.''' + if collection is ftrack_api.symbol.NOT_SET: + return False + + return collection.collection == self.collection + + def __iter__(self): + '''Iterate over all keys.''' + keys = self._get_keys() + return iter(keys) + + def __len__(self): + '''Return count of keys.''' + keys = self._get_keys() + return len(keys) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/data.py b/pype/modules/ftrack/python2_vendor/ftrack_api/data.py new file mode 100644 index 0000000000..108b2edf6f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/data.py @@ -0,0 +1,145 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2013 ftrack + +from builtins import object +import os +from abc import ABCMeta, abstractmethod +import tempfile +from future.utils import with_metaclass + + +class Data(with_metaclass(ABCMeta, object)): + '''File-like object for manipulating data.''' + + def __init__(self): + '''Initialise data access.''' + self.closed = False + + @abstractmethod + def read(self, limit=None): + '''Return content from current position up to *limit*.''' + + @abstractmethod + def write(self, content): + '''Write content at current position.''' + + def flush(self): + '''Flush buffers ensuring data written.''' + + def seek(self, offset, whence=os.SEEK_SET): + '''Move internal pointer by *offset*. + + The *whence* argument is optional and defaults to os.SEEK_SET or 0 + (absolute file positioning); other values are os.SEEK_CUR or 1 + (seek relative to the current position) and os.SEEK_END or 2 + (seek relative to the file's end). + + ''' + raise NotImplementedError('Seek not supported.') + + def tell(self): + '''Return current position of internal pointer.''' + raise NotImplementedError('Tell not supported.') + + def close(self): + '''Flush buffers and prevent further access.''' + self.flush() + self.closed = True + + +class FileWrapper(Data): + '''Data wrapper for Python file objects.''' + + def __init__(self, wrapped_file): + '''Initialise access to *wrapped_file*.''' + self.wrapped_file = wrapped_file + self._read_since_last_write = False + super(FileWrapper, self).__init__() + + def read(self, limit=None): + '''Return content from current position up to *limit*.''' + self._read_since_last_write = True + + if limit is None: + limit = -1 + + return self.wrapped_file.read(limit) + + def write(self, content): + '''Write content at current position.''' + if self._read_since_last_write: + # Windows requires a seek before switching from read to write. + self.seek(self.tell()) + + self.wrapped_file.write(content) + self._read_since_last_write = False + + def flush(self): + '''Flush buffers ensuring data written.''' + super(FileWrapper, self).flush() + if hasattr(self.wrapped_file, 'flush'): + self.wrapped_file.flush() + + def seek(self, offset, whence=os.SEEK_SET): + '''Move internal pointer by *offset*.''' + self.wrapped_file.seek(offset, whence) + + def tell(self): + '''Return current position of internal pointer.''' + return self.wrapped_file.tell() + + def close(self): + '''Flush buffers and prevent further access.''' + if not self.closed: + super(FileWrapper, self).close() + if hasattr(self.wrapped_file, 'close'): + self.wrapped_file.close() + + +class File(FileWrapper): + '''Data wrapper accepting filepath.''' + + def __init__(self, path, mode='rb'): + '''Open file at *path* with *mode*.''' + file_object = open(path, mode) + super(File, self).__init__(file_object) + + +class String(FileWrapper): + '''Data wrapper using TemporaryFile instance.''' + + def __init__(self, content=None): + '''Initialise data with *content*.''' + + # Track if data is binary or not. If it is binary then read should also + # return binary. + self.is_binary = True + + super(String, self).__init__( + tempfile.TemporaryFile() + ) + + if content is not None: + if not isinstance(content, bytes): + self.is_binary = False + content = content.encode() + + self.wrapped_file.write(content) + self.wrapped_file.seek(0) + + def write(self, content): + if not isinstance(content, bytes): + self.is_binary = False + content = content.encode() + + super(String, self).write( + content + ) + + def read(self, limit=None): + content = super(String, self).read(limit) + + if not self.is_binary: + content = content.decode('utf-8') + + return content diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/__init__.py new file mode 100644 index 0000000000..1d452f2828 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/__init__.py @@ -0,0 +1,2 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack \ No newline at end of file diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/asset_version.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/asset_version.py new file mode 100644 index 0000000000..859d94e436 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/asset_version.py @@ -0,0 +1,91 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +import ftrack_api.entity.base + + +class AssetVersion(ftrack_api.entity.base.Entity): + '''Represent asset version.''' + + def create_component( + self, path, data=None, location=None + ): + '''Create a new component from *path* with additional *data* + + .. note:: + + This is a helper method. To create components manually use the + standard :meth:`Session.create` method. + + *path* can be a string representing a filesystem path to the data to + use for the component. The *path* can also be specified as a sequence + string, in which case a sequence component with child components for + each item in the sequence will be created automatically. The accepted + format for a sequence is '{head}{padding}{tail} [{ranges}]'. For + example:: + + '/path/to/file.%04d.ext [1-5, 7, 8, 10-20]' + + .. seealso:: + + `Clique documentation `_ + + *data* should be a dictionary of any additional data to construct the + component with (as passed to :meth:`Session.create`). This version is + automatically set as the component's version. + + If *location* is specified then automatically add component to that + location. + + ''' + if data is None: + data = {} + + data.pop('version_id', None) + data['version'] = self + + return self.session.create_component(path, data=data, location=location) + + def encode_media(self, media, keep_original='auto'): + '''Return a new Job that encode *media* to make it playable in browsers. + + *media* can be a path to a file or a FileComponent in the ftrack.server + location. + + The job will encode *media* based on the file type and job data contains + information about encoding in the following format:: + + { + 'output': [{ + 'format': 'video/mp4', + 'component_id': 'e2dc0524-b576-11d3-9612-080027331d74' + }, { + 'format': 'image/jpeg', + 'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b' + }], + 'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294', + 'keep_original': True + } + + The output components are associated with the job via the job_components + relation. + + An image component will always be generated if possible, and will be + set as the version's thumbnail. + + The new components will automatically be associated with the version. + A server version of 3.3.32 or higher is required for this to function + properly. + + If *media* is a file path, a new source component will be created and + added to the ftrack server location and a call to :meth:`commit` will be + issued. If *media* is a FileComponent, it will be assumed to be in + available in the ftrack.server location. + + If *keep_original* is not set, the original media will be kept if it + is a FileComponent, and deleted if it is a file path. You can specify + True or False to change this behavior. + ''' + return self.session.encode_media( + media, version_id=self['id'], keep_original=keep_original + ) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/base.py new file mode 100644 index 0000000000..4932372214 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/base.py @@ -0,0 +1,407 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from __future__ import absolute_import + +from builtins import str +import abc +import collections +import logging + +import ftrack_api.symbol +import ftrack_api.attribute +import ftrack_api.inspection +import ftrack_api.exception +import ftrack_api.operation +from ftrack_api.logging import LazyLogMessage as L +from future.utils import with_metaclass + + +class _EntityBase(object): + '''Base class to allow for mixins, we need a common base.''' + pass + + +class DynamicEntityTypeMetaclass(abc.ABCMeta): + '''Custom metaclass to customise representation of dynamic classes. + + .. note:: + + Derive from same metaclass as derived bases to avoid conflicts. + + ''' + def __repr__(self): + '''Return representation of class.''' + return ''.format(self.__name__) + + +class Entity(with_metaclass(DynamicEntityTypeMetaclass, _EntityBase, collections.MutableMapping)): + '''Base class for all entities.''' + + entity_type = 'Entity' + attributes = None + primary_key_attributes = None + default_projections = None + + def __init__(self, session, data=None, reconstructing=False): + '''Initialise entity. + + *session* is an instance of :class:`ftrack_api.session.Session` that + this entity instance is bound to. + + *data* is a mapping of key, value pairs to apply as initial attribute + values. + + *reconstructing* indicates whether this entity is being reconstructed, + such as from a query, and therefore should not have any special creation + logic applied, such as initialising defaults for missing data. + + ''' + super(Entity, self).__init__() + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + self.session = session + self._inflated = set() + + if data is None: + data = {} + + self.logger.debug(L( + '{0} entity from {1!r}.', + ('Reconstructing' if reconstructing else 'Constructing'), data + )) + + self._ignore_data_keys = ['__entity_type__'] + if not reconstructing: + self._construct(data) + else: + self._reconstruct(data) + + def _construct(self, data): + '''Construct from *data*.''' + # Suspend operation recording so that all modifications can be applied + # in single create operation. In addition, recording a modification + # operation requires a primary key which may not be available yet. + + relational_attributes = dict() + + with self.session.operation_recording(False): + # Set defaults for any unset local attributes. + for attribute in self.__class__.attributes: + if attribute.name not in data: + default_value = attribute.default_value + if callable(default_value): + default_value = default_value(self) + + attribute.set_local_value(self, default_value) + + + # Data represents locally set values. + for key, value in list(data.items()): + if key in self._ignore_data_keys: + continue + + attribute = self.__class__.attributes.get(key) + if attribute is None: + self.logger.debug(L( + 'Cannot populate {0!r} attribute as no such ' + 'attribute found on entity {1!r}.', key, self + )) + continue + + if not isinstance(attribute, ftrack_api.attribute.ScalarAttribute): + relational_attributes.setdefault( + attribute, value + ) + + else: + attribute.set_local_value(self, value) + + # Record create operation. + # Note: As this operation is recorded *before* any Session.merge takes + # place there is the possibility that the operation will hold references + # to outdated data in entity_data. However, this would be unusual in + # that it would mean the same new entity was created twice and only one + # altered. Conversely, if this operation were recorded *after* + # Session.merge took place, any cache would not be able to determine + # the status of the entity, which could be important if the cache should + # not store newly created entities that have not yet been persisted. Out + # of these two 'evils' this approach is deemed the lesser at this time. + # A third, more involved, approach to satisfy both might be to record + # the operation with a PENDING entity_data value and then update with + # merged values post merge. + if self.session.record_operations: + entity_data = {} + + # Lower level API used here to avoid including any empty + # collections that are automatically generated on access. + for attribute in self.attributes: + value = attribute.get_local_value(self) + if value is not ftrack_api.symbol.NOT_SET: + entity_data[attribute.name] = value + + self.session.recorded_operations.push( + ftrack_api.operation.CreateEntityOperation( + self.entity_type, + ftrack_api.inspection.primary_key(self), + entity_data + ) + ) + + for attribute, value in list(relational_attributes.items()): + # Finally we set values for "relational" attributes, we need + # to do this at the end in order to get the create operations + # in the correct order as the newly created attributes might + # contain references to the newly created entity. + + attribute.set_local_value( + self, value + ) + + def _reconstruct(self, data): + '''Reconstruct from *data*.''' + # Data represents remote values. + for key, value in list(data.items()): + if key in self._ignore_data_keys: + continue + + attribute = self.__class__.attributes.get(key) + if attribute is None: + self.logger.debug(L( + 'Cannot populate {0!r} attribute as no such attribute ' + 'found on entity {1!r}.', key, self + )) + continue + + attribute.set_remote_value(self, value) + + def __repr__(self): + '''Return representation of instance.''' + return ''.format( + self.__class__.__name__, id(self) + ) + + def __str__(self): + '''Return string representation of instance.''' + with self.session.auto_populating(False): + primary_key = ['Unknown'] + try: + primary_key = list(ftrack_api.inspection.primary_key(self).values()) + except KeyError: + pass + + return '<{0}({1})>'.format( + self.__class__.__name__, ', '.join(primary_key) + ) + + def __hash__(self): + '''Return hash representing instance.''' + return hash(str(ftrack_api.inspection.identity(self))) + + def __eq__(self, other): + '''Return whether *other* is equal to this instance. + + .. note:: + + Equality is determined by both instances having the same identity. + Values of attributes are not considered. + + ''' + try: + return ( + ftrack_api.inspection.identity(other) + == ftrack_api.inspection.identity(self) + ) + except (AttributeError, KeyError): + return False + + def __getitem__(self, key): + '''Return attribute value for *key*.''' + attribute = self.__class__.attributes.get(key) + if attribute is None: + raise KeyError(key) + + return attribute.get_value(self) + + def __setitem__(self, key, value): + '''Set attribute *value* for *key*.''' + attribute = self.__class__.attributes.get(key) + if attribute is None: + raise KeyError(key) + + attribute.set_local_value(self, value) + + def __delitem__(self, key): + '''Clear attribute value for *key*. + + .. note:: + + Will not remove the attribute, but instead clear any local value + and revert to the last known server value. + + ''' + attribute = self.__class__.attributes.get(key) + attribute.set_local_value(self, ftrack_api.symbol.NOT_SET) + + def __iter__(self): + '''Iterate over all attributes keys.''' + for attribute in self.__class__.attributes: + yield attribute.name + + def __len__(self): + '''Return count of attributes.''' + return len(self.__class__.attributes) + + def values(self): + '''Return list of values.''' + if self.session.auto_populate: + self._populate_unset_scalar_attributes() + + return list(super(Entity, self).values()) + + def items(self): + '''Return list of tuples of (key, value) pairs. + + .. note:: + + Will fetch all values from the server if not already fetched or set + locally. + + ''' + if self.session.auto_populate: + self._populate_unset_scalar_attributes() + + return list(super(Entity, self).items()) + + def clear(self): + '''Reset all locally modified attribute values.''' + for attribute in self: + del self[attribute] + + def merge(self, entity, merged=None): + '''Merge *entity* attribute values and other data into this entity. + + Only merge values from *entity* that are not + :attr:`ftrack_api.symbol.NOT_SET`. + + Return a list of changes made with each change being a mapping with + the keys: + + * type - Either 'remote_attribute', 'local_attribute' or 'property'. + * name - The name of the attribute / property modified. + * old_value - The previous value. + * new_value - The new merged value. + + ''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if merged is None: + merged = {} + + log_message = 'Merged {type} "{name}": {old_value!r} -> {new_value!r}' + changes = [] + + # Attributes. + + # Prioritise by type so that scalar values are set first. This should + # guarantee that the attributes making up the identity of the entity + # are merged before merging any collections that may have references to + # this entity. + attributes = collections.deque() + for attribute in entity.attributes: + if isinstance(attribute, ftrack_api.attribute.ScalarAttribute): + attributes.appendleft(attribute) + else: + attributes.append(attribute) + + for other_attribute in attributes: + attribute = self.attributes.get(other_attribute.name) + + # Local attributes. + other_local_value = other_attribute.get_local_value(entity) + if other_local_value is not ftrack_api.symbol.NOT_SET: + local_value = attribute.get_local_value(self) + if local_value != other_local_value: + merged_local_value = self.session.merge( + other_local_value, merged=merged + ) + + attribute.set_local_value(self, merged_local_value) + changes.append({ + 'type': 'local_attribute', + 'name': attribute.name, + 'old_value': local_value, + 'new_value': merged_local_value + }) + log_debug and self.logger.debug( + log_message.format(**changes[-1]) + ) + + # Remote attributes. + other_remote_value = other_attribute.get_remote_value(entity) + if other_remote_value is not ftrack_api.symbol.NOT_SET: + remote_value = attribute.get_remote_value(self) + if remote_value != other_remote_value: + merged_remote_value = self.session.merge( + other_remote_value, merged=merged + ) + + attribute.set_remote_value( + self, merged_remote_value + ) + + changes.append({ + 'type': 'remote_attribute', + 'name': attribute.name, + 'old_value': remote_value, + 'new_value': merged_remote_value + }) + + log_debug and self.logger.debug( + log_message.format(**changes[-1]) + ) + + # We need to handle collections separately since + # they may store a local copy of the remote attribute + # even though it may not be modified. + if not isinstance( + attribute, ftrack_api.attribute.AbstractCollectionAttribute + ): + continue + + local_value = attribute.get_local_value( + self + ) + + # Populated but not modified, update it. + if ( + local_value is not ftrack_api.symbol.NOT_SET and + local_value == remote_value + ): + attribute.set_local_value( + self, merged_remote_value + ) + changes.append({ + 'type': 'local_attribute', + 'name': attribute.name, + 'old_value': local_value, + 'new_value': merged_remote_value + }) + + log_debug and self.logger.debug( + log_message.format(**changes[-1]) + ) + + return changes + + def _populate_unset_scalar_attributes(self): + '''Populate all unset scalar attributes in one query.''' + projections = [] + for attribute in self.attributes: + if isinstance(attribute, ftrack_api.attribute.ScalarAttribute): + if attribute.get_remote_value(self) is ftrack_api.symbol.NOT_SET: + projections.append(attribute.name) + + if projections: + self.session.populate([self], ', '.join(projections)) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/component.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/component.py new file mode 100644 index 0000000000..c08ff7918b --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/component.py @@ -0,0 +1,75 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +from builtins import object +import ftrack_api.entity.base + + +class Component(ftrack_api.entity.base.Entity): + '''Represent a component.''' + + def get_availability(self, locations=None): + '''Return availability in *locations*. + + If *locations* is None, all known locations will be checked. + + Return a dictionary of {location_id:percentage_availability} + + ''' + return self.session.get_component_availability( + self, locations=locations + ) + + +class CreateThumbnailMixin(object): + '''Mixin to add create_thumbnail method on entity class.''' + + def create_thumbnail(self, path, data=None): + '''Set entity thumbnail from *path*. + + Creates a thumbnail component using in the ftrack.server location + :meth:`Session.create_component + ` The thumbnail component + will be created using *data* if specified. If no component name is + given, `thumbnail` will be used. + + The file is expected to be of an appropriate size and valid file + type. + + .. note:: + + A :meth:`Session.commit` will be + automatically issued. + + ''' + if data is None: + data = {} + if not data.get('name'): + data['name'] = 'thumbnail' + + thumbnail_component = self.session.create_component( + path, data, location=None + ) + + origin_location = self.session.get( + 'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID + ) + server_location = self.session.get( + 'Location', ftrack_api.symbol.SERVER_LOCATION_ID + ) + server_location.add_component(thumbnail_component, [origin_location]) + + # TODO: This commit can be avoided by reordering the operations in + # this method so that the component is transferred to ftrack.server + # after the thumbnail has been set. + # + # There is currently a bug in the API backend, causing the operations + # to *some* times be ordered wrongly, where the update occurs before + # the component has been created, causing an integrity error. + # + # Once this issue has been resolved, this commit can be removed and + # and the update placed between component creation and registration. + self['thumbnail_id'] = thumbnail_component['id'] + self.session.commit() + + return thumbnail_component diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/factory.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/factory.py new file mode 100644 index 0000000000..ba1f086fa0 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/factory.py @@ -0,0 +1,443 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from __future__ import absolute_import + + +from builtins import str +from builtins import object + +import logging +import uuid +import functools + +import ftrack_api.attribute +import ftrack_api.entity.base +import ftrack_api.entity.location +import ftrack_api.entity.component +import ftrack_api.entity.asset_version +import ftrack_api.entity.project_schema +import ftrack_api.entity.note +import ftrack_api.entity.job +import ftrack_api.entity.user +import ftrack_api.symbol +import ftrack_api.cache +from ftrack_api.logging import LazyLogMessage as L + + +class Factory(object): + '''Entity class factory.''' + + def __init__(self): + '''Initialise factory.''' + super(Factory, self).__init__() + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + + def create(self, schema, bases=None): + '''Create and return entity class from *schema*. + + *bases* should be a list of bases to give the constructed class. If not + specified, default to :class:`ftrack_api.entity.base.Entity`. + + ''' + entity_type = schema['id'] + class_name = entity_type + + class_bases = bases + if class_bases is None: + class_bases = [ftrack_api.entity.base.Entity] + + class_namespace = dict() + + # Build attributes for class. + attributes = ftrack_api.attribute.Attributes() + immutable_properties = schema.get('immutable', []) + computed_properties = schema.get('computed', []) + for name, fragment in list(schema.get('properties', {}).items()): + mutable = name not in immutable_properties + computed = name in computed_properties + + default = fragment.get('default', ftrack_api.symbol.NOT_SET) + if default == '{uid}': + default = lambda instance: str(uuid.uuid4()) + + data_type = fragment.get('type', ftrack_api.symbol.NOT_SET) + + if data_type is not ftrack_api.symbol.NOT_SET: + + if data_type in ( + 'string', 'boolean', 'integer', 'number', 'variable', + 'object' + ): + # Basic scalar attribute. + if data_type == 'number': + data_type = 'float' + + if data_type == 'string': + data_format = fragment.get('format') + if data_format == 'date-time': + data_type = 'datetime' + + attribute = self.create_scalar_attribute( + class_name, name, mutable, computed, default, data_type + ) + if attribute: + attributes.add(attribute) + + elif data_type == 'array': + attribute = self.create_collection_attribute( + class_name, name, mutable + ) + if attribute: + attributes.add(attribute) + + elif data_type == 'mapped_array': + reference = fragment.get('items', {}).get('$ref') + if not reference: + self.logger.debug(L( + 'Skipping {0}.{1} mapped_array attribute that does ' + 'not define a schema reference.', class_name, name + )) + continue + + attribute = self.create_mapped_collection_attribute( + class_name, name, mutable, reference + ) + if attribute: + attributes.add(attribute) + + else: + self.logger.debug(L( + 'Skipping {0}.{1} attribute with unrecognised data ' + 'type {2}', class_name, name, data_type + )) + else: + # Reference attribute. + reference = fragment.get('$ref', ftrack_api.symbol.NOT_SET) + if reference is ftrack_api.symbol.NOT_SET: + self.logger.debug(L( + 'Skipping {0}.{1} mapped_array attribute that does ' + 'not define a schema reference.', class_name, name + )) + continue + + attribute = self.create_reference_attribute( + class_name, name, mutable, reference + ) + if attribute: + attributes.add(attribute) + + default_projections = schema.get('default_projections', []) + + # Construct class. + class_namespace['entity_type'] = entity_type + class_namespace['attributes'] = attributes + class_namespace['primary_key_attributes'] = schema['primary_key'][:] + class_namespace['default_projections'] = default_projections + + from future.utils import ( + native_str + ) + + cls = type( + native_str(class_name), # type doesn't accept unicode. + tuple(class_bases), + class_namespace + ) + + return cls + + def create_scalar_attribute( + self, class_name, name, mutable, computed, default, data_type + ): + '''Return appropriate scalar attribute instance.''' + return ftrack_api.attribute.ScalarAttribute( + name, data_type=data_type, default_value=default, mutable=mutable, + computed=computed + ) + + def create_reference_attribute(self, class_name, name, mutable, reference): + '''Return appropriate reference attribute instance.''' + return ftrack_api.attribute.ReferenceAttribute( + name, reference, mutable=mutable + ) + + def create_collection_attribute(self, class_name, name, mutable): + '''Return appropriate collection attribute instance.''' + return ftrack_api.attribute.CollectionAttribute( + name, mutable=mutable + ) + + def create_mapped_collection_attribute( + self, class_name, name, mutable, reference + ): + '''Return appropriate mapped collection attribute instance.''' + self.logger.debug(L( + 'Skipping {0}.{1} mapped_array attribute that has ' + 'no implementation defined for reference {2}.', + class_name, name, reference + )) + + +class PerSessionDefaultKeyMaker(ftrack_api.cache.KeyMaker): + '''Generate key for defaults.''' + + def _key(self, obj): + '''Return key for *obj*.''' + if isinstance(obj, dict): + entity = obj.get('entity') + if entity is not None: + # Key by session only. + return str(id(entity.session)) + + return str(obj) + + +#: Memoiser for use with default callables that should only be called once per +# session. +memoise_defaults = ftrack_api.cache.memoise_decorator( + ftrack_api.cache.Memoiser( + key_maker=PerSessionDefaultKeyMaker(), return_copies=False + ) +) + +#: Memoiser for use with callables that should be called once per session. +memoise_session = ftrack_api.cache.memoise_decorator( + ftrack_api.cache.Memoiser( + key_maker=PerSessionDefaultKeyMaker(), return_copies=False + ) +) + + +@memoise_session +def _get_custom_attribute_configurations(session): + '''Return list of custom attribute configurations. + + The configuration objects will have key, project_id, id and object_type_id + populated. + + ''' + return session.query( + 'select key, project_id, id, object_type_id, entity_type, ' + 'is_hierarchical from CustomAttributeConfiguration' + ).all() + + +def _get_entity_configurations(entity): + '''Return all configurations for current collection entity.''' + entity_type = None + project_id = None + object_type_id = None + + if 'object_type_id' in entity.keys(): + project_id = entity['project_id'] + entity_type = 'task' + object_type_id = entity['object_type_id'] + + if entity.entity_type == 'AssetVersion': + project_id = entity['asset']['parent']['project_id'] + entity_type = 'assetversion' + + if entity.entity_type == 'Project': + project_id = entity['id'] + entity_type = 'show' + + if entity.entity_type == 'User': + entity_type = 'user' + + if entity.entity_type == 'Asset': + entity_type = 'asset' + + if entity.entity_type in ('TypedContextList', 'AssetVersionList'): + entity_type = 'list' + + if entity_type is None: + raise ValueError( + 'Entity {!r} not supported.'.format(entity) + ) + + configurations = [] + for configuration in _get_custom_attribute_configurations( + entity.session + ): + if ( + configuration['entity_type'] == entity_type and + configuration['project_id'] in (project_id, None) and + configuration['object_type_id'] == object_type_id + ): + # The custom attribute configuration is for the target entity type. + configurations.append(configuration) + elif ( + entity_type in ('asset', 'assetversion', 'show', 'task') and + configuration['project_id'] in (project_id, None) and + configuration['is_hierarchical'] + ): + # The target entity type allows hierarchical attributes. + configurations.append(configuration) + + # Return with global configurations at the end of the list. This is done + # so that global conigurations are shadowed by project specific if the + # configurations list is looped when looking for a matching `key`. + return sorted( + configurations, key=lambda item: item['project_id'] is None + ) + + +class StandardFactory(Factory): + '''Standard entity class factory.''' + + def create(self, schema, bases=None): + '''Create and return entity class from *schema*.''' + if not bases: + bases = [] + + extra_bases = [] + # Customise classes. + if schema['id'] == 'ProjectSchema': + extra_bases = [ftrack_api.entity.project_schema.ProjectSchema] + + elif schema['id'] == 'Location': + extra_bases = [ftrack_api.entity.location.Location] + + elif schema['id'] == 'AssetVersion': + extra_bases = [ftrack_api.entity.asset_version.AssetVersion] + + elif schema['id'].endswith('Component'): + extra_bases = [ftrack_api.entity.component.Component] + + elif schema['id'] == 'Note': + extra_bases = [ftrack_api.entity.note.Note] + + elif schema['id'] == 'Job': + extra_bases = [ftrack_api.entity.job.Job] + + elif schema['id'] == 'User': + extra_bases = [ftrack_api.entity.user.User] + + bases = extra_bases + bases + + # If bases does not contain any items, add the base entity class. + if not bases: + bases = [ftrack_api.entity.base.Entity] + + # Add mixins. + if 'notes' in schema.get('properties', {}): + bases.append( + ftrack_api.entity.note.CreateNoteMixin + ) + + if 'thumbnail_id' in schema.get('properties', {}): + bases.append( + ftrack_api.entity.component.CreateThumbnailMixin + ) + + cls = super(StandardFactory, self).create(schema, bases=bases) + + return cls + + def create_mapped_collection_attribute( + self, class_name, name, mutable, reference + ): + '''Return appropriate mapped collection attribute instance.''' + if reference == 'Metadata': + + def create_metadata(proxy, data, reference): + '''Return metadata for *data*.''' + entity = proxy.collection.entity + session = entity.session + data.update({ + 'parent_id': entity['id'], + 'parent_type': entity.entity_type + }) + return session.create(reference, data) + + creator = functools.partial( + create_metadata, reference=reference + ) + key_attribute = 'key' + value_attribute = 'value' + + return ftrack_api.attribute.KeyValueMappedCollectionAttribute( + name, creator, key_attribute, value_attribute, mutable=mutable + ) + + elif reference == 'CustomAttributeValue': + return ( + ftrack_api.attribute.CustomAttributeCollectionAttribute( + name, mutable=mutable + ) + ) + + elif reference.endswith('CustomAttributeValue'): + def creator(proxy, data): + '''Create a custom attribute based on *proxy* and *data*. + + Raise :py:exc:`KeyError` if related entity is already presisted + to the server. The proxy represents dense custom attribute + values and should never create new custom attribute values + through the proxy if entity exists on the remote. + + If the entity is not persisted the ususal + CustomAttributeValue items cannot be updated as + the related entity does not exist on remote and values not in + the proxy. Instead a CustomAttributeValue will + be reconstructed and an update operation will be recorded. + + ''' + entity = proxy.collection.entity + if ( + ftrack_api.inspection.state(entity) is not + ftrack_api.symbol.CREATED + ): + raise KeyError( + 'Custom attributes must be created explicitly for the ' + 'given entity type before being set.' + ) + + configuration = None + for candidate in _get_entity_configurations(entity): + if candidate['key'] == data['key']: + configuration = candidate + break + + if configuration is None: + raise ValueError( + u'No valid custom attribute for data {0!r} was found.' + .format(data) + ) + + create_data = dict(list(data.items())) + create_data['configuration_id'] = configuration['id'] + create_data['entity_id'] = entity['id'] + + session = entity.session + + # Create custom attribute by reconstructing it and update the + # value. This will prevent a create operation to be sent to the + # remote, as create operations for this entity type is not + # allowed. Instead an update operation will be recorded. + value = create_data.pop('value') + item = session.create( + reference, + create_data, + reconstructing=True + ) + + # Record update operation. + item['value'] = value + + return item + + key_attribute = 'key' + value_attribute = 'value' + + return ftrack_api.attribute.KeyValueMappedCollectionAttribute( + name, creator, key_attribute, value_attribute, mutable=mutable + ) + + self.logger.debug(L( + 'Skipping {0}.{1} mapped_array attribute that has no configuration ' + 'for reference {2}.', class_name, name, reference + )) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/job.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/job.py new file mode 100644 index 0000000000..ae37922c51 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/job.py @@ -0,0 +1,48 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +import ftrack_api.entity.base + + +class Job(ftrack_api.entity.base.Entity): + '''Represent job.''' + + def __init__(self, session, data=None, reconstructing=False): + '''Initialise entity. + + *session* is an instance of :class:`ftrack_api.session.Session` that + this entity instance is bound to. + + *data* is a mapping of key, value pairs to apply as initial attribute + values. + + To set a job `description` visible in the web interface, *data* can + contain a key called `data` which should be a JSON serialised + dictionary containing description:: + + data = { + 'status': 'running', + 'data': json.dumps(dict(description='My job description.')), + ... + } + + Will raise a :py:exc:`ValueError` if *data* contains `type` and `type` + is set to something not equal to "api_job". + + *reconstructing* indicates whether this entity is being reconstructed, + such as from a query, and therefore should not have any special creation + logic applied, such as initialising defaults for missing data. + + ''' + + if not reconstructing: + if data.get('type') not in ('api_job', None): + raise ValueError( + 'Invalid job type "{0}". Must be "api_job"'.format( + data.get('type') + ) + ) + + super(Job, self).__init__( + session, data=data, reconstructing=reconstructing + ) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/location.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/location.py new file mode 100644 index 0000000000..1c81bdc789 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/location.py @@ -0,0 +1,745 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +from builtins import zip +from six import string_types +from builtins import object +import collections +import functools + +import ftrack_api.entity.base +import ftrack_api.exception +import ftrack_api.event.base +import ftrack_api.symbol +import ftrack_api.inspection +from ftrack_api.logging import LazyLogMessage as L + +from future.utils import with_metaclass + + +MixinBaseClass = with_metaclass( + ftrack_api.entity.base.DynamicEntityTypeMetaclass, + ftrack_api.entity.base._EntityBase, + collections.MutableMapping +) + + +class Location(ftrack_api.entity.base.Entity): + '''Represent storage for components.''' + + def __init__(self, session, data=None, reconstructing=False): + '''Initialise entity. + + *session* is an instance of :class:`ftrack_api.session.Session` that + this entity instance is bound to. + + *data* is a mapping of key, value pairs to apply as initial attribute + values. + + *reconstructing* indicates whether this entity is being reconstructed, + such as from a query, and therefore should not have any special creation + logic applied, such as initialising defaults for missing data. + + ''' + self.accessor = ftrack_api.symbol.NOT_SET + self.structure = ftrack_api.symbol.NOT_SET + self.resource_identifier_transformer = ftrack_api.symbol.NOT_SET + self.priority = 95 + super(Location, self).__init__( + session, data=data, reconstructing=reconstructing + ) + + def __str__(self): + '''Return string representation of instance.''' + representation = super(Location, self).__str__() + + with self.session.auto_populating(False): + name = self['name'] + if name is not ftrack_api.symbol.NOT_SET: + representation = representation.replace( + '(', '("{0}", '.format(name) + ) + + return representation + + def add_component(self, component, source, recursive=True): + '''Add *component* to location. + + *component* should be a single component instance. + + *source* should be an instance of another location that acts as the + source. + + Raise :exc:`ftrack_api.ComponentInLocationError` if the *component* + already exists in this location. + + Raise :exc:`ftrack_api.LocationError` if managing data and the generated + target structure for the component already exists according to the + accessor. This helps prevent potential data loss by avoiding overwriting + existing data. Note that there is a race condition between the check and + the write so if another process creates data at the same target during + that period it will be overwritten. + + .. note:: + + A :meth:`Session.commit` may be + automatically issued as part of the component registration. + + ''' + return self.add_components( + [component], sources=source, recursive=recursive + ) + + def add_components(self, components, sources, recursive=True, _depth=0): + '''Add *components* to location. + + *components* should be a list of component instances. + + *sources* may be either a single source or a list of sources. If a list + then each corresponding index in *sources* will be used for each + *component*. A source should be an instance of another location. + + Raise :exc:`ftrack_api.exception.ComponentInLocationError` if any + component in *components* already exists in this location. In this case, + no changes will be made and no data transferred. + + Raise :exc:`ftrack_api.exception.LocationError` if managing data and the + generated target structure for the component already exists according to + the accessor. This helps prevent potential data loss by avoiding + overwriting existing data. Note that there is a race condition between + the check and the write so if another process creates data at the same + target during that period it will be overwritten. + + .. note:: + + A :meth:`Session.commit` may be + automatically issued as part of the components registration. + + .. important:: + + If this location manages data then the *components* data is first + transferred to the target prescribed by the structure plugin, using + the configured accessor. If any component fails to transfer then + :exc:`ftrack_api.exception.LocationError` is raised and none of the + components are registered with the database. In this case it is left + up to the caller to decide and act on manually cleaning up any + transferred data using the 'transferred' detail in the raised error. + + Likewise, after transfer, all components are registered with the + database in a batch call. If any component causes an error then all + components will remain unregistered and + :exc:`ftrack_api.exception.LocationError` will be raised detailing + issues and any transferred data under the 'transferred' detail key. + + ''' + if ( + isinstance(sources, string_types) + or not isinstance(sources, collections.Sequence) + ): + sources = [sources] + + sources_count = len(sources) + if sources_count not in (1, len(components)): + raise ValueError( + 'sources must be either a single source or a sequence of ' + 'sources with indexes corresponding to passed components.' + ) + + if not self.structure: + raise ftrack_api.exception.LocationError( + 'No structure defined for location {location}.', + details=dict(location=self) + ) + + if not components: + # Optimisation: Return early when no components to process, such as + # when called recursively on an empty sequence component. + return + + indent = ' ' * (_depth + 1) + + # Check that components not already added to location. + existing_components = [] + try: + self.get_resource_identifiers(components) + + except ftrack_api.exception.ComponentNotInLocationError as error: + missing_component_ids = [ + missing_component['id'] + for missing_component in error.details['components'] + ] + for component in components: + if component['id'] not in missing_component_ids: + existing_components.append(component) + + else: + existing_components.extend(components) + + if existing_components: + # Some of the components already present in location. + raise ftrack_api.exception.ComponentInLocationError( + existing_components, self + ) + + # Attempt to transfer each component's data to this location. + transferred = [] + + for index, component in enumerate(components): + try: + # Determine appropriate source. + if sources_count == 1: + source = sources[0] + else: + source = sources[index] + + # Add members first for container components. + is_container = 'members' in list(component.keys()) + if is_container and recursive: + self.add_components( + component['members'], source, recursive=recursive, + _depth=(_depth + 1) + ) + + # Add component to this location. + context = self._get_context(component, source) + resource_identifier = self.structure.get_resource_identifier( + component, context + ) + + # Manage data transfer. + self._add_data(component, resource_identifier, source) + + except Exception as error: + raise ftrack_api.exception.LocationError( + 'Failed to transfer component {component} data to location ' + '{location} due to error:\n{indent}{error}\n{indent}' + 'Transferred component data that may require cleanup: ' + '{transferred}', + details=dict( + indent=indent, + component=component, + location=self, + error=error, + transferred=transferred + ) + ) + + else: + transferred.append((component, resource_identifier)) + + # Register all successfully transferred components. + components_to_register = [] + component_resource_identifiers = [] + + try: + for component, resource_identifier in transferred: + if self.resource_identifier_transformer: + # Optionally encode resource identifier before storing. + resource_identifier = ( + self.resource_identifier_transformer.encode( + resource_identifier, + context={'component': component} + ) + ) + + components_to_register.append(component) + component_resource_identifiers.append(resource_identifier) + + # Store component in location information. + self._register_components_in_location( + components, component_resource_identifiers + ) + + except Exception as error: + raise ftrack_api.exception.LocationError( + 'Failed to register components with location {location} due to ' + 'error:\n{indent}{error}\n{indent}Transferred component data ' + 'that may require cleanup: {transferred}', + details=dict( + indent=indent, + location=self, + error=error, + transferred=transferred + ) + ) + + # Publish events. + for component in components_to_register: + + component_id = list(ftrack_api.inspection.primary_key( + component + ).values())[0] + location_id = list(ftrack_api.inspection.primary_key(self).values())[0] + + self.session.event_hub.publish( + ftrack_api.event.base.Event( + topic=ftrack_api.symbol.COMPONENT_ADDED_TO_LOCATION_TOPIC, + data=dict( + component_id=component_id, + location_id=location_id + ), + ), + on_error='ignore' + ) + + def _get_context(self, component, source): + '''Return context for *component* and *source*.''' + context = {} + if source: + try: + source_resource_identifier = source.get_resource_identifier( + component + ) + except ftrack_api.exception.ComponentNotInLocationError: + pass + else: + context.update(dict( + source_resource_identifier=source_resource_identifier + )) + + return context + + def _add_data(self, component, resource_identifier, source): + '''Manage transfer of *component* data from *source*. + + *resource_identifier* specifies the identifier to use with this + locations accessor. + + ''' + self.logger.debug(L( + 'Adding data for component {0!r} from source {1!r} to location ' + '{2!r} using resource identifier {3!r}.', + component, resource_identifier, source, self + )) + + # Read data from source and write to this location. + if not source.accessor: + raise ftrack_api.exception.LocationError( + 'No accessor defined for source location {location}.', + details=dict(location=source) + ) + + if not self.accessor: + raise ftrack_api.exception.LocationError( + 'No accessor defined for target location {location}.', + details=dict(location=self) + ) + + is_container = 'members' in list(component.keys()) + if is_container: + # TODO: Improve this check. Possibly introduce an inspection + # such as ftrack_api.inspection.is_sequence_component. + if component.entity_type != 'SequenceComponent': + self.accessor.make_container(resource_identifier) + + else: + # Try to make container of component. + try: + container = self.accessor.get_container( + resource_identifier + ) + + except ftrack_api.exception.AccessorParentResourceNotFoundError: + # Container could not be retrieved from + # resource_identifier. Assume that there is no need to + # make the container. + pass + + else: + # No need for existence check as make_container does not + # recreate existing containers. + self.accessor.make_container(container) + + if self.accessor.exists(resource_identifier): + # Note: There is a race condition here in that the + # data may be added externally between the check for + # existence and the actual write which would still + # result in potential data loss. However, there is no + # good cross platform, cross accessor solution for this + # at present. + raise ftrack_api.exception.LocationError( + 'Cannot add component as data already exists and ' + 'overwriting could result in data loss. Computed ' + 'target resource identifier was: {0}' + .format(resource_identifier) + ) + + # Read and write data. + source_data = source.accessor.open( + source.get_resource_identifier(component), 'rb' + ) + target_data = self.accessor.open(resource_identifier, 'wb') + + # Read/write data in chunks to avoid reading all into memory at the + # same time. + chunked_read = functools.partial( + source_data.read, ftrack_api.symbol.CHUNK_SIZE + ) + for chunk in iter(chunked_read, b''): + target_data.write(chunk) + + target_data.close() + source_data.close() + + def _register_component_in_location(self, component, resource_identifier): + '''Register *component* in location against *resource_identifier*.''' + return self._register_components_in_location( + [component], [resource_identifier] + ) + + def _register_components_in_location( + self, components, resource_identifiers + ): + '''Register *components* in location against *resource_identifiers*. + + Indices of *components* and *resource_identifiers* should align. + + ''' + for component, resource_identifier in zip( + components, resource_identifiers + ): + self.session.create( + 'ComponentLocation', data=dict( + component=component, + location=self, + resource_identifier=resource_identifier + ) + ) + + self.session.commit() + + def remove_component(self, component, recursive=True): + '''Remove *component* from location. + + .. note:: + + A :meth:`Session.commit` may be + automatically issued as part of the component deregistration. + + ''' + return self.remove_components([component], recursive=recursive) + + def remove_components(self, components, recursive=True): + '''Remove *components* from location. + + .. note:: + + A :meth:`Session.commit` may be + automatically issued as part of the components deregistration. + + ''' + for component in components: + # Check component is in this location + self.get_resource_identifier(component) + + # Remove members first for container components. + is_container = 'members' in list(component.keys()) + if is_container and recursive: + self.remove_components( + component['members'], recursive=recursive + ) + + # Remove data. + self._remove_data(component) + + # Remove metadata. + self._deregister_component_in_location(component) + + # Emit event. + component_id = list(ftrack_api.inspection.primary_key( + component + ).values())[0] + location_id = list(ftrack_api.inspection.primary_key(self).values())[0] + self.session.event_hub.publish( + ftrack_api.event.base.Event( + topic=ftrack_api.symbol.COMPONENT_REMOVED_FROM_LOCATION_TOPIC, + data=dict( + component_id=component_id, + location_id=location_id + ) + ), + on_error='ignore' + ) + + def _remove_data(self, component): + '''Remove data associated with *component*.''' + if not self.accessor: + raise ftrack_api.exception.LocationError( + 'No accessor defined for location {location}.', + details=dict(location=self) + ) + + try: + self.accessor.remove( + self.get_resource_identifier(component) + ) + except ftrack_api.exception.AccessorResourceNotFoundError: + # If accessor does not support detecting sequence paths then an + # AccessorResourceNotFoundError is raised. For now, if the + # component type is 'SequenceComponent' assume success. + if not component.entity_type == 'SequenceComponent': + raise + + def _deregister_component_in_location(self, component): + '''Deregister *component* from location.''' + component_id = list(ftrack_api.inspection.primary_key(component).values())[0] + location_id = list(ftrack_api.inspection.primary_key(self).values())[0] + + # TODO: Use session.get for optimisation. + component_location = self.session.query( + 'ComponentLocation where component_id is {0} and location_id is ' + '{1}'.format(component_id, location_id) + )[0] + + self.session.delete(component_location) + + # TODO: Should auto-commit here be optional? + self.session.commit() + + def get_component_availability(self, component): + '''Return availability of *component* in this location as a float.''' + return self.session.get_component_availability( + component, locations=[self] + )[self['id']] + + def get_component_availabilities(self, components): + '''Return availabilities of *components* in this location. + + Return list of float values corresponding to each component. + + ''' + return [ + availability[self['id']] for availability in + self.session.get_component_availabilities( + components, locations=[self] + ) + ] + + def get_resource_identifier(self, component): + '''Return resource identifier for *component*. + + Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if the + component is not present in this location. + + ''' + return self.get_resource_identifiers([component])[0] + + def get_resource_identifiers(self, components): + '''Return resource identifiers for *components*. + + Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any + of the components are not present in this location. + + ''' + resource_identifiers = self._get_resource_identifiers(components) + + # Optionally decode resource identifier. + if self.resource_identifier_transformer: + for index, resource_identifier in enumerate(resource_identifiers): + resource_identifiers[index] = ( + self.resource_identifier_transformer.decode( + resource_identifier, + context={'component': components[index]} + ) + ) + + return resource_identifiers + + def _get_resource_identifiers(self, components): + '''Return resource identifiers for *components*. + + Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any + of the components are not present in this location. + + ''' + component_ids_mapping = collections.OrderedDict() + for component in components: + component_id = list(ftrack_api.inspection.primary_key( + component + ).values())[0] + component_ids_mapping[component_id] = component + + component_locations = self.session.query( + 'select component_id, resource_identifier from ComponentLocation ' + 'where location_id is {0} and component_id in ({1})' + .format( + list(ftrack_api.inspection.primary_key(self).values())[0], + ', '.join(list(component_ids_mapping.keys())) + ) + ) + + resource_identifiers_map = {} + for component_location in component_locations: + resource_identifiers_map[component_location['component_id']] = ( + component_location['resource_identifier'] + ) + + resource_identifiers = [] + missing = [] + for component_id, component in list(component_ids_mapping.items()): + if component_id not in resource_identifiers_map: + missing.append(component) + else: + resource_identifiers.append( + resource_identifiers_map[component_id] + ) + + if missing: + raise ftrack_api.exception.ComponentNotInLocationError( + missing, self + ) + + return resource_identifiers + + def get_filesystem_path(self, component): + '''Return filesystem path for *component*.''' + return self.get_filesystem_paths([component])[0] + + def get_filesystem_paths(self, components): + '''Return filesystem paths for *components*.''' + resource_identifiers = self.get_resource_identifiers(components) + + filesystem_paths = [] + for resource_identifier in resource_identifiers: + filesystem_paths.append( + self.accessor.get_filesystem_path(resource_identifier) + ) + + return filesystem_paths + + def get_url(self, component): + '''Return url for *component*. + + Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if + URL could not be determined from *component* or + :exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if + retrieving URL is not supported by the location's accessor. + ''' + resource_identifier = self.get_resource_identifier(component) + + return self.accessor.get_url(resource_identifier) + + +class MemoryLocationMixin(MixinBaseClass): + '''Represent storage for components. + + Unlike a standard location, only store metadata for components in this + location in memory rather than persisting to the database. + + ''' + + @property + def _cache(self): + '''Return cache.''' + try: + cache = self.__cache + except AttributeError: + cache = self.__cache = {} + + return cache + + def _register_component_in_location(self, component, resource_identifier): + '''Register *component* in location with *resource_identifier*.''' + component_id = list(ftrack_api.inspection.primary_key(component).values())[0] + self._cache[component_id] = resource_identifier + + def _register_components_in_location( + self, components, resource_identifiers + ): + '''Register *components* in location against *resource_identifiers*. + + Indices of *components* and *resource_identifiers* should align. + + ''' + for component, resource_identifier in zip( + components, resource_identifiers + ): + self._register_component_in_location(component, resource_identifier) + + def _deregister_component_in_location(self, component): + '''Deregister *component* in location.''' + component_id = list(ftrack_api.inspection.primary_key(component).values())[0] + self._cache.pop(component_id) + + def _get_resource_identifiers(self, components): + '''Return resource identifiers for *components*. + + Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any + of the referenced components are not present in this location. + + ''' + resource_identifiers = [] + missing = [] + for component in components: + component_id = list(ftrack_api.inspection.primary_key( + component + ).values())[0] + resource_identifier = self._cache.get(component_id) + if resource_identifier is None: + missing.append(component) + else: + resource_identifiers.append(resource_identifier) + + if missing: + raise ftrack_api.exception.ComponentNotInLocationError( + missing, self + ) + + return resource_identifiers + + +class UnmanagedLocationMixin(MixinBaseClass): + '''Location that does not manage data.''' + + def _add_data(self, component, resource_identifier, source): + '''Manage transfer of *component* data from *source*. + + *resource_identifier* specifies the identifier to use with this + locations accessor. + + Overridden to have no effect. + + ''' + return + + def _remove_data(self, component): + '''Remove data associated with *component*. + + Overridden to have no effect. + + ''' + return + + +class OriginLocationMixin(MemoryLocationMixin, UnmanagedLocationMixin): + '''Special origin location that expects sources as filepaths.''' + + def _get_context(self, component, source): + '''Return context for *component* and *source*.''' + context = {} + if source: + context.update(dict( + source_resource_identifier=source + )) + + return context + + +class ServerLocationMixin(MixinBaseClass): + '''Location representing ftrack server. + + Adds convenience methods to location, specific to ftrack server. + ''' + def get_thumbnail_url(self, component, size=None): + '''Return thumbnail url for *component*. + + Optionally, specify *size* to constrain the downscaled image to size + x size pixels. + + Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if + URL could not be determined from *resource_identifier* or + :exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if + retrieving URL is not supported by the location's accessor. + ''' + resource_identifier = self.get_resource_identifier(component) + return self.accessor.get_thumbnail_url(resource_identifier, size) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/note.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/note.py new file mode 100644 index 0000000000..5fadbd9ad8 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/note.py @@ -0,0 +1,105 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +import warnings +from builtins import object +import ftrack_api.entity.base + + +class Note(ftrack_api.entity.base.Entity): + '''Represent a note.''' + + def create_reply( + self, content, author + ): + '''Create a reply with *content* and *author*. + + .. note:: + + This is a helper method. To create replies manually use the + standard :meth:`Session.create` method. + + ''' + reply = self.session.create( + 'Note', { + 'author': author, + 'content': content + } + ) + + self['replies'].append(reply) + + return reply + + +class CreateNoteMixin(object): + '''Mixin to add create_note method on entity class.''' + + def create_note( + self, content, author, recipients=None, category=None, labels=None + ): + '''Create note with *content*, *author*. + + NoteLabels can be set by including *labels*. + + Note category can be set by including *category*. + + *recipients* can be specified as a list of user or group instances. + + ''' + note_label_support = 'NoteLabel' in self.session.types + + if not labels: + labels = [] + + if labels and not note_label_support: + raise ValueError( + 'NoteLabel is not supported by the current server version.' + ) + + if category and labels: + raise ValueError( + 'Both category and labels cannot be set at the same time.' + ) + + if not recipients: + recipients = [] + + data = { + 'content': content, + 'author': author + } + + if category: + if note_label_support: + labels = [category] + warnings.warn( + 'category argument will be removed in an upcoming version, ' + 'please use labels instead.', + PendingDeprecationWarning + ) + else: + data['category_id'] = category['id'] + + note = self.session.create('Note', data) + + self['notes'].append(note) + + for resource in recipients: + recipient = self.session.create('Recipient', { + 'note_id': note['id'], + 'resource_id': resource['id'] + }) + + note['recipients'].append(recipient) + + for label in labels: + self.session.create( + 'NoteLabelLink', + { + 'label_id': label['id'], + 'note_id': note['id'] + } + ) + + return note diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/project_schema.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/project_schema.py new file mode 100644 index 0000000000..ec6db7c019 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/project_schema.py @@ -0,0 +1,94 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +import ftrack_api.entity.base + + +class ProjectSchema(ftrack_api.entity.base.Entity): + '''Class representing ProjectSchema.''' + + def get_statuses(self, schema, type_id=None): + '''Return statuses for *schema* and optional *type_id*. + + *type_id* is the id of the Type for a TypedContext and can be used to + get statuses where the workflow has been overridden. + + ''' + # Task has overrides and need to be handled separately. + if schema == 'Task': + if type_id is not None: + overrides = self['_overrides'] + for override in overrides: + if override['type_id'] == type_id: + return override['workflow_schema']['statuses'][:] + + return self['_task_workflow']['statuses'][:] + + elif schema == 'AssetVersion': + return self['_version_workflow']['statuses'][:] + + else: + try: + EntityTypeClass = self.session.types[schema] + except KeyError: + raise ValueError('Schema {0} does not exist.'.format(schema)) + + object_type_id_attribute = EntityTypeClass.attributes.get( + 'object_type_id' + ) + + try: + object_type_id = object_type_id_attribute.default_value + except AttributeError: + raise ValueError( + 'Schema {0} does not have statuses.'.format(schema) + ) + + for _schema in self['_schemas']: + if _schema['type_id'] == object_type_id: + result = self.session.query( + 'select task_status from SchemaStatus ' + 'where schema_id is {0}'.format(_schema['id']) + ) + return [ + schema_type['task_status'] for schema_type in result + ] + + raise ValueError( + 'No valid statuses were found for schema {0}.'.format(schema) + ) + + def get_types(self, schema): + '''Return types for *schema*.''' + # Task need to be handled separately. + if schema == 'Task': + return self['_task_type_schema']['types'][:] + + else: + try: + EntityTypeClass = self.session.types[schema] + except KeyError: + raise ValueError('Schema {0} does not exist.'.format(schema)) + + object_type_id_attribute = EntityTypeClass.attributes.get( + 'object_type_id' + ) + + try: + object_type_id = object_type_id_attribute.default_value + except AttributeError: + raise ValueError( + 'Schema {0} does not have types.'.format(schema) + ) + + for _schema in self['_schemas']: + if _schema['type_id'] == object_type_id: + result = self.session.query( + 'select task_type from SchemaType ' + 'where schema_id is {0}'.format(_schema['id']) + ) + return [schema_type['task_type'] for schema_type in result] + + raise ValueError( + 'No valid types were found for schema {0}.'.format(schema) + ) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/entity/user.py b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/user.py new file mode 100644 index 0000000000..49318f8650 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/entity/user.py @@ -0,0 +1,124 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +from builtins import str +import arrow + +import ftrack_api.entity.base +import ftrack_api.exception + + +class User(ftrack_api.entity.base.Entity): + '''Represent a user.''' + + def start_timer(self, context=None, comment='', name=None, force=False): + '''Start a timer for *context* and return it. + + *force* can be used to automatically stop an existing timer and create a + timelog for it. If you need to get access to the created timelog, use + :func:`stop_timer` instead. + + *comment* and *name* are optional but will be set on the timer. + + .. note:: + + This method will automatically commit the changes and if *force* is + False then it will fail with a + :class:`ftrack_api.exception.NotUniqueError` exception if a + timer is already running. + + ''' + if force: + try: + self.stop_timer() + except ftrack_api.exception.NoResultFoundError: + self.logger.debug('Failed to stop existing timer.') + + timer = self.session.create('Timer', { + 'user': self, + 'context': context, + 'name': name, + 'comment': comment + }) + + # Commit the new timer and try to catch any error that indicate another + # timelog already exists and inform the user about it. + try: + self.session.commit() + except ftrack_api.exception.ServerError as error: + if 'IntegrityError' in str(error): + raise ftrack_api.exception.NotUniqueError( + ('Failed to start a timelog for user with id: {0}, it is ' + 'likely that a timer is already running. Either use ' + 'force=True or stop the timer first.').format(self['id']) + ) + else: + # Reraise the error as it might be something unrelated. + raise + + return timer + + def stop_timer(self): + '''Stop the current timer and return a timelog created from it. + + If a timer is not running, a + :exc:`ftrack_api.exception.NoResultFoundError` exception will be + raised. + + .. note:: + + This method will automatically commit the changes. + + ''' + timer = self.session.query( + 'Timer where user_id = "{0}"'.format(self['id']) + ).one() + + # If the server is running in the same timezone as the local + # timezone, we remove the TZ offset to get the correct duration. + is_timezone_support_enabled = self.session.server_information.get( + 'is_timezone_support_enabled', None + ) + if is_timezone_support_enabled is None: + self.logger.warning( + 'Could not identify if server has timezone support enabled. ' + 'Will assume server is running in UTC.' + ) + is_timezone_support_enabled = True + + if is_timezone_support_enabled: + now = arrow.now() + else: + now = arrow.now().replace(tzinfo='utc') + + delta = now - timer['start'] + duration = delta.days * 24 * 60 * 60 + delta.seconds + + timelog = self.session.create('Timelog', { + 'user_id': timer['user_id'], + 'context_id': timer['context_id'], + 'comment': timer['comment'], + 'start': timer['start'], + 'duration': duration, + 'name': timer['name'] + }) + + self.session.delete(timer) + self.session.commit() + + return timelog + + def send_invite(self): + '''Send a invation email to the user''' + + self.session.send_user_invite( + self + ) + def reset_api_key(self): + '''Reset the users api key.''' + + response = self.session.reset_remote( + 'api_key', entity=self + ) + + return response['api_key'] diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/event/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api/event/__init__.py new file mode 100644 index 0000000000..1aab07ed77 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/event/__init__.py @@ -0,0 +1,2 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/event/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api/event/base.py new file mode 100644 index 0000000000..f6f0a916c7 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/event/base.py @@ -0,0 +1,86 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from builtins import str +import uuid +import collections + + +class Event(collections.MutableMapping): + '''Represent a single event.''' + + def __init__(self, topic, id=None, data=None, sent=None, + source=None, target='', in_reply_to_event=None): + '''Initialise event. + + *topic* is the required topic for the event. It can use a dotted + notation to demarcate groupings. For example, 'ftrack.update'. + + *id* is the unique id for this event instance. It is primarily used when + replying to an event. If not supplied a default uuid based value will + be used. + + *data* refers to event specific data. It should be a mapping structure + and defaults to an empty dictionary if not supplied. + + *sent* is the timestamp the event is sent. It will be set automatically + as send time unless specified here. + + *source* is information about where the event originated. It should be + a mapping and include at least a unique id value under an 'id' key. If + not specified, senders usually populate the value automatically at + publish time. + + *target* can be an expression that targets this event. For example, + a reply event would target the event to the sender of the source event. + The expression will be tested against subscriber information only. + + *in_reply_to_event* is used when replying to an event and should contain + the unique id of the event being replied to. + + ''' + super(Event, self).__init__() + self._data = dict( + id=id or uuid.uuid4().hex, + data=data or {}, + topic=topic, + sent=sent, + source=source or {}, + target=target, + in_reply_to_event=in_reply_to_event + ) + self._stopped = False + + def stop(self): + '''Stop further processing of this event.''' + self._stopped = True + + def is_stopped(self): + '''Return whether event has been stopped.''' + return self._stopped + + def __str__(self): + '''Return string representation.''' + return '<{0} {1}>'.format( + self.__class__.__name__, str(self._data) + ) + + def __getitem__(self, key): + '''Return value for *key*.''' + return self._data[key] + + def __setitem__(self, key, value): + '''Set *value* for *key*.''' + self._data[key] = value + + def __delitem__(self, key): + '''Remove *key*.''' + del self._data[key] + + def __iter__(self): + '''Iterate over all keys.''' + return iter(self._data) + + def __len__(self): + '''Return count of keys.''' + return len(self._data) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/event/expression.py b/pype/modules/ftrack/python2_vendor/ftrack_api/event/expression.py new file mode 100644 index 0000000000..b8dae6cf5a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/event/expression.py @@ -0,0 +1,285 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from builtins import map +from six import string_types +from builtins import object +from operator import eq, ne, ge, le, gt, lt + +from pyparsing import (Group, Word, CaselessKeyword, Forward, + FollowedBy, Suppress, oneOf, OneOrMore, Optional, + alphanums, quotedString, removeQuotes) + +import ftrack_api.exception + +# Do not enable packrat since it is not thread-safe and will result in parsing +# exceptions in a multi threaded environment. +# ParserElement.enablePackrat() + + +class Parser(object): + '''Parse string based expression into :class:`Expression` instance.''' + + def __init__(self): + '''Initialise parser.''' + self._operators = { + '=': eq, + '!=': ne, + '>=': ge, + '<=': le, + '>': gt, + '<': lt + } + self._parser = self._construct_parser() + super(Parser, self).__init__() + + def _construct_parser(self): + '''Construct and return parser.''' + field = Word(alphanums + '_.') + operator = oneOf(list(self._operators.keys())) + value = Word(alphanums + '-_,./*@+') + quoted_value = quotedString('quoted_value').setParseAction(removeQuotes) + + condition = Group( + field + operator + (quoted_value | value) + )('condition') + + not_ = Optional(Suppress(CaselessKeyword('not')))('not') + and_ = Suppress(CaselessKeyword('and'))('and') + or_ = Suppress(CaselessKeyword('or'))('or') + + expression = Forward() + parenthesis = Suppress('(') + expression + Suppress(')') + previous = condition | parenthesis + + for conjunction in (not_, and_, or_): + current = Forward() + + if conjunction in (and_, or_): + conjunction_expression = ( + FollowedBy(previous + conjunction + previous) + + Group( + previous + OneOrMore(conjunction + previous) + )(conjunction.resultsName) + ) + + elif conjunction in (not_, ): + conjunction_expression = ( + FollowedBy(conjunction.expr + current) + + Group(conjunction + current)(conjunction.resultsName) + ) + + else: # pragma: no cover + raise ValueError('Unrecognised conjunction.') + + current <<= (conjunction_expression | previous) + previous = current + + expression <<= previous + return expression('expression') + + def parse(self, expression): + '''Parse string *expression* into :class:`Expression`. + + Raise :exc:`ftrack_api.exception.ParseError` if *expression* could + not be parsed. + + ''' + result = None + expression = expression.strip() + if expression: + try: + result = self._parser.parseString( + expression, parseAll=True + ) + except Exception as error: + raise ftrack_api.exception.ParseError( + 'Failed to parse: {0}. {1}'.format(expression, error) + ) + + return self._process(result) + + def _process(self, result): + '''Process *result* using appropriate method. + + Method called is determined by the name of the result. + + ''' + method_name = '_process_{0}'.format(result.getName()) + method = getattr(self, method_name) + return method(result) + + def _process_expression(self, result): + '''Process *result* as expression.''' + return self._process(result[0]) + + def _process_not(self, result): + '''Process *result* as NOT operation.''' + return Not(self._process(result[0])) + + def _process_and(self, result): + '''Process *result* as AND operation.''' + return All([self._process(entry) for entry in result]) + + def _process_or(self, result): + '''Process *result* as OR operation.''' + return Any([self._process(entry) for entry in result]) + + def _process_condition(self, result): + '''Process *result* as condition.''' + key, operator, value = result + return Condition(key, self._operators[operator], value) + + def _process_quoted_value(self, result): + '''Process *result* as quoted value.''' + return result + + +class Expression(object): + '''Represent a structured expression to test candidates against.''' + + def __str__(self): + '''Return string representation.''' + return '<{0}>'.format(self.__class__.__name__) + + def match(self, candidate): + '''Return whether *candidate* satisfies this expression.''' + return True + + +class All(Expression): + '''Match candidate that matches all of the specified expressions. + + .. note:: + + If no expressions are supplied then will always match. + + ''' + + def __init__(self, expressions=None): + '''Initialise with list of *expressions* to match against.''' + self._expressions = expressions or [] + super(All, self).__init__() + + def __str__(self): + '''Return string representation.''' + return '<{0} [{1}]>'.format( + self.__class__.__name__, + ' '.join(map(str, self._expressions)) + ) + + def match(self, candidate): + '''Return whether *candidate* satisfies this expression.''' + return all([ + expression.match(candidate) for expression in self._expressions + ]) + + +class Any(Expression): + '''Match candidate that matches any of the specified expressions. + + .. note:: + + If no expressions are supplied then will never match. + + ''' + + def __init__(self, expressions=None): + '''Initialise with list of *expressions* to match against.''' + self._expressions = expressions or [] + super(Any, self).__init__() + + def __str__(self): + '''Return string representation.''' + return '<{0} [{1}]>'.format( + self.__class__.__name__, + ' '.join(map(str, self._expressions)) + ) + + def match(self, candidate): + '''Return whether *candidate* satisfies this expression.''' + return any([ + expression.match(candidate) for expression in self._expressions + ]) + + +class Not(Expression): + '''Negate expression.''' + + def __init__(self, expression): + '''Initialise with *expression* to negate.''' + self._expression = expression + super(Not, self).__init__() + + def __str__(self): + '''Return string representation.''' + return '<{0} {1}>'.format( + self.__class__.__name__, + self._expression + ) + + def match(self, candidate): + '''Return whether *candidate* satisfies this expression.''' + return not self._expression.match(candidate) + + +class Condition(Expression): + '''Represent condition.''' + + def __init__(self, key, operator, value): + '''Initialise condition. + + *key* is the key to check on the data when matching. It can be a nested + key represented by dots. For example, 'data.eventType' would attempt to + match candidate['data']['eventType']. If the candidate is missing any + of the requested keys then the match fails immediately. + + *operator* is the operator function to use to perform the match between + the retrieved candidate value and the conditional *value*. + + If *value* is a string, it can use a wildcard '*' at the end to denote + that any values matching the substring portion are valid when matching + equality only. + + ''' + self._key = key + self._operator = operator + self._value = value + self._wildcard = '*' + self._operatorMapping = { + eq: '=', + ne: '!=', + ge: '>=', + le: '<=', + gt: '>', + lt: '<' + } + + def __str__(self): + '''Return string representation.''' + return '<{0} {1}{2}{3}>'.format( + self.__class__.__name__, + self._key, + self._operatorMapping.get(self._operator, self._operator), + self._value + ) + + def match(self, candidate): + '''Return whether *candidate* satisfies this expression.''' + key_parts = self._key.split('.') + + try: + value = candidate + for keyPart in key_parts: + value = value[keyPart] + except (KeyError, TypeError): + return False + + if ( + self._operator is eq + and isinstance(self._value, string_types) + and self._value[-1] == self._wildcard + ): + return self._value[:-1] in value + else: + return self._operator(value, self._value) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/event/hub.py b/pype/modules/ftrack/python2_vendor/ftrack_api/event/hub.py new file mode 100644 index 0000000000..c95f04c7f9 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/event/hub.py @@ -0,0 +1,1108 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2013 ftrack + +from __future__ import absolute_import + +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import range +from builtins import object +import collections +import urllib.parse +import threading +import Queue as queue +import logging +import time +import uuid +import operator +import functools +import json +import socket +import warnings +import ssl + +import requests +import requests.exceptions +import websocket + +import ftrack_api.exception +import ftrack_api.event.base +import ftrack_api.event.subscriber +import ftrack_api.event.expression +from ftrack_api.logging import LazyLogMessage as L + + +SocketIoSession = collections.namedtuple('SocketIoSession', [ + 'id', + 'heartbeatTimeout', + 'supportedTransports', +]) + + +ServerDetails = collections.namedtuple('ServerDetails', [ + 'scheme', + 'hostname', + 'port', +]) + + +class EventHub(object): + '''Manage routing of events.''' + + def __init__(self, server_url, api_user, api_key): + '''Initialise hub, connecting to ftrack *server_url*. + + *api_user* is the user to authenticate as and *api_key* is the API key + to authenticate with. + + ''' + super(EventHub, self).__init__() + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + self.id = uuid.uuid4().hex + self._connection = None + + self._unique_packet_id = 0 + self._packet_callbacks = {} + self._lock = threading.RLock() + + self._wait_timeout = 4 + + self._subscribers = [] + self._reply_callbacks = {} + self._intentional_disconnect = False + + self._event_queue = queue.Queue() + self._event_namespace = 'ftrack.event' + self._expression_parser = ftrack_api.event.expression.Parser() + + # Track if a connection has been initialised. + self._connection_initialised = False + + # Default values for auto reconnection timeout on unintentional + # disconnection. Equates to 5 minutes. + self._auto_reconnect_attempts = 30 + self._auto_reconnect_delay = 10 + + # Mapping of Socket.IO codes to meaning. + self._code_name_mapping = { + '0': 'disconnect', + '1': 'connect', + '2': 'heartbeat', + '3': 'message', + '4': 'json', + '5': 'event', + '6': 'acknowledge', + '7': 'error' + } + self._code_name_mapping.update( + dict((name, code) for code, name in list(self._code_name_mapping.items())) + ) + + self._server_url = server_url + self._api_user = api_user + self._api_key = api_key + + # Parse server URL and store server details. + url_parse_result = urllib.parse.urlparse(self._server_url) + if not url_parse_result.scheme: + raise ValueError('Could not determine scheme from server url.') + + if not url_parse_result.hostname: + raise ValueError('Could not determine hostname from server url.') + + self.server = ServerDetails( + url_parse_result.scheme, + url_parse_result.hostname, + url_parse_result.port + ) + + def get_server_url(self): + '''Return URL to server.''' + return '{0}://{1}'.format( + self.server.scheme, self.get_network_location() + ) + + def get_network_location(self): + '''Return network location part of url (hostname with optional port).''' + if self.server.port: + return '{0}:{1}'.format(self.server.hostname, self.server.port) + else: + return self.server.hostname + + @property + def secure(self): + '''Return whether secure connection used.''' + return self.server.scheme == 'https' + + def connect(self): + '''Initialise connection to server. + + Raise :exc:`ftrack_api.exception.EventHubConnectionError` if already + connected or connection fails. + + ''' + # Update tracking flag for connection. + self._connection_initialised = True + + if self.connected: + raise ftrack_api.exception.EventHubConnectionError( + 'Already connected.' + ) + + # Reset flag tracking whether disconnection was intentional. + self._intentional_disconnect = False + + try: + # Connect to socket.io server using websocket transport. + session = self._get_socket_io_session() + + if 'websocket' not in session.supportedTransports: + raise ValueError( + 'Server does not support websocket sessions.' + ) + + scheme = 'wss' if self.secure else 'ws' + url = '{0}://{1}/socket.io/1/websocket/{2}'.format( + scheme, self.get_network_location(), session.id + ) + + # Select highest available protocol for websocket connection. + ssl_protocols = [ + 'PROTOCOL_TLS', + 'PROTOCOL_TLSv1_2' + ] + + available_ssl_protocol = None + + for ssl_protocol in ssl_protocols: + if hasattr(ssl, ssl_protocol): + available_ssl_protocol = getattr(ssl, ssl_protocol) + self.logger.debug( + 'Using protocol {} to connect to websocket.'.format( + ssl_protocol + )) + break + + # timeout is set to 60 seconds to avoid the issue where the socket + # ends up in a bad state where it is reported as connected but the + # connection has been closed. The issue happens often when connected + # to a secure socket and the computer goes to sleep. + # More information on how the timeout works can be found here: + # https://docs.python.org/2/library/socket.html#socket.socket.setblocking + self._connection = websocket.create_connection( + url, timeout=60, sslopt={"ssl_version": available_ssl_protocol} + ) + + except Exception as error: + error_message = ( + 'Failed to connect to event server at {server_url} with ' + 'error: "{error}".' + ) + + error_details = { + 'error': str(error), + 'server_url': self.get_server_url() + } + + self.logger.debug( + L( + error_message, **error_details + ), + exc_info=1 + ) + raise ftrack_api.exception.EventHubConnectionError( + error_message, + details=error_details + ) + + # Start background processing thread. + self._processor_thread = _ProcessorThread(self) + self._processor_thread.start() + + # Subscribe to reply events if not already. Note: Only adding the + # subscriber locally as the following block will notify server of all + # existing subscribers, which would cause the server to report a + # duplicate subscriber error if EventHub.subscribe was called here. + try: + self._add_subscriber( + 'topic=ftrack.meta.reply', + self._handle_reply, + subscriber=dict( + id=self.id + ) + ) + except ftrack_api.exception.NotUniqueError: + pass + + # Now resubscribe any existing stored subscribers. This can happen when + # reconnecting automatically for example. + for subscriber in self._subscribers[:]: + self._notify_server_about_subscriber(subscriber) + + @property + def connected(self): + '''Return if connected.''' + return self._connection is not None and self._connection.connected + + def disconnect(self, unsubscribe=True): + '''Disconnect from server. + + Raise :exc:`ftrack_api.exception.EventHubConnectionError` if not + currently connected. + + If *unsubscribe* is True then unsubscribe all current subscribers + automatically before disconnecting. + + ''' + if not self.connected: + raise ftrack_api.exception.EventHubConnectionError( + 'Not currently connected.' + ) + + else: + # Set flag to indicate disconnection was intentional. + self._intentional_disconnect = True + + # Set blocking to true on socket to make sure unsubscribe events + # are emitted before closing the connection. + self._connection.sock.setblocking(1) + + # Unsubscribe all subscribers. + if unsubscribe: + for subscriber in self._subscribers[:]: + self.unsubscribe(subscriber.metadata['id']) + + # Now disconnect. + self._connection.close() + self._connection = None + + # Shutdown background processing thread. + self._processor_thread.cancel() + + # Join to it if it is not current thread to help ensure a clean + # shutdown. + if threading.current_thread() != self._processor_thread: + self._processor_thread.join(self._wait_timeout) + + def reconnect(self, attempts=10, delay=5): + '''Reconnect to server. + + Make *attempts* number of attempts with *delay* in seconds between each + attempt. + + .. note:: + + All current subscribers will be automatically resubscribed after + successful reconnection. + + Raise :exc:`ftrack_api.exception.EventHubConnectionError` if fail to + reconnect. + + ''' + try: + self.disconnect(unsubscribe=False) + except ftrack_api.exception.EventHubConnectionError: + pass + + for attempt in range(attempts): + self.logger.debug(L( + 'Reconnect attempt {0} of {1}', attempt, attempts + )) + + # Silence logging temporarily to avoid lots of failed connection + # related information. + try: + logging.disable(logging.CRITICAL) + + try: + self.connect() + except ftrack_api.exception.EventHubConnectionError: + time.sleep(delay) + else: + break + + finally: + logging.disable(logging.NOTSET) + + if not self.connected: + raise ftrack_api.exception.EventHubConnectionError( + 'Failed to reconnect to event server at {0} after {1} attempts.' + .format(self.get_server_url(), attempts) + ) + + def wait(self, duration=None): + '''Wait for events and handle as they arrive. + + If *duration* is specified, then only process events until duration is + reached. *duration* is in seconds though float values can be used for + smaller values. + + ''' + + if not self._connection_initialised: + raise ftrack_api.exception.EventHubConnectionError( + 'Event hub does not have a connection to the event server and ' + 'will therefore only be able to receive syncronous events.' + 'Please see http://ftrack-python-api.rtd.ftrack.com/en/stable/' + 'release/migration.html#default-behavior-for-connecting-to-event-hub' + ' for further information.' + ) + + started = time.time() + + while True: + try: + event = self._event_queue.get(timeout=0.1) + except queue.Empty: + pass + else: + self._handle(event) + + # Additional special processing of events. + if event['topic'] == 'ftrack.meta.disconnected': + break + + if duration is not None: + if (time.time() - started) > duration: + break + + def get_subscriber_by_identifier(self, identifier): + '''Return subscriber with matching *identifier*. + + Return None if no subscriber with *identifier* found. + + ''' + for subscriber in self._subscribers[:]: + if subscriber.metadata.get('id') == identifier: + return subscriber + + return None + + def subscribe(self, subscription, callback, subscriber=None, priority=100): + '''Register *callback* for *subscription*. + + A *subscription* is a string that can specify in detail which events the + callback should receive. The filtering is applied against each event + object. Nested references are supported using '.' separators. + For example, 'topic=foo and data.eventType=Shot' would match the + following event:: + + + + The *callback* should accept an instance of + :class:`ftrack_api.event.base.Event` as its sole argument. + + Callbacks are called in order of *priority*. The lower the priority + number the sooner it will be called, with 0 being the first. The + default priority is 100. Note that priority only applies against other + callbacks registered with this hub and not as a global priority. + + An earlier callback can prevent processing of subsequent callbacks by + calling :meth:`Event.stop` on the passed `event` before + returning. + + .. warning:: + + Handlers block processing of other received events. For long + running callbacks it is advisable to delegate the main work to + another process or thread. + + A *callback* can be attached to *subscriber* information that details + the subscriber context. A subscriber context will be generated + automatically if not supplied. + + .. note:: + + The subscription will be stored locally, but until the server + receives notification of the subscription it is possible the + callback will not be called. + + Return subscriber identifier. + + Raise :exc:`ftrack_api.exception.NotUniqueError` if a subscriber with + the same identifier already exists. + + ''' + # Add subscriber locally. + subscriber = self._add_subscriber( + subscription, callback, subscriber, priority + ) + + # Notify server now if possible. + try: + self._notify_server_about_subscriber(subscriber) + except ftrack_api.exception.EventHubConnectionError: + self.logger.debug(L( + 'Failed to notify server about new subscriber {0} ' + 'as server not currently reachable.', subscriber.metadata['id'] + )) + + return subscriber.metadata['id'] + + def _add_subscriber( + self, subscription, callback, subscriber=None, priority=100 + ): + '''Add subscriber locally. + + See :meth:`subscribe` for argument descriptions. + + Return :class:`ftrack_api.event.subscriber.Subscriber` instance. + + Raise :exc:`ftrack_api.exception.NotUniqueError` if a subscriber with + the same identifier already exists. + + ''' + if subscriber is None: + subscriber = {} + + subscriber.setdefault('id', uuid.uuid4().hex) + + # Check subscriber not already subscribed. + existing_subscriber = self.get_subscriber_by_identifier( + subscriber['id'] + ) + + if existing_subscriber is not None: + raise ftrack_api.exception.NotUniqueError( + 'Subscriber with identifier {0} already exists.' + .format(subscriber['id']) + ) + + subscriber = ftrack_api.event.subscriber.Subscriber( + subscription=subscription, + callback=callback, + metadata=subscriber, + priority=priority + ) + + self._subscribers.append(subscriber) + + return subscriber + + def _notify_server_about_subscriber(self, subscriber): + '''Notify server of new *subscriber*.''' + subscribe_event = ftrack_api.event.base.Event( + topic='ftrack.meta.subscribe', + data=dict( + subscriber=subscriber.metadata, + subscription=str(subscriber.subscription) + ) + ) + + self._publish( + subscribe_event, + callback=functools.partial(self._on_subscribed, subscriber) + ) + + def _on_subscribed(self, subscriber, response): + '''Handle acknowledgement of subscription.''' + if response.get('success') is False: + self.logger.warning(L( + 'Server failed to subscribe subscriber {0}: {1}', + subscriber.metadata['id'], response.get('message') + )) + + def unsubscribe(self, subscriber_identifier): + '''Unsubscribe subscriber with *subscriber_identifier*. + + .. note:: + + If the server is not reachable then it won't be notified of the + unsubscription. However, the subscriber will be removed locally + regardless. + + ''' + subscriber = self.get_subscriber_by_identifier(subscriber_identifier) + + if subscriber is None: + raise ftrack_api.exception.NotFoundError( + 'Cannot unsubscribe missing subscriber with identifier {0}' + .format(subscriber_identifier) + ) + + self._subscribers.pop(self._subscribers.index(subscriber)) + + # Notify the server if possible. + unsubscribe_event = ftrack_api.event.base.Event( + topic='ftrack.meta.unsubscribe', + data=dict(subscriber=subscriber.metadata) + ) + + try: + self._publish( + unsubscribe_event, + callback=functools.partial(self._on_unsubscribed, subscriber) + ) + except ftrack_api.exception.EventHubConnectionError: + self.logger.debug(L( + 'Failed to notify server to unsubscribe subscriber {0} as ' + 'server not currently reachable.', subscriber.metadata['id'] + )) + + def _on_unsubscribed(self, subscriber, response): + '''Handle acknowledgement of unsubscribing *subscriber*.''' + if response.get('success') is not True: + self.logger.warning(L( + 'Server failed to unsubscribe subscriber {0}: {1}', + subscriber.metadata['id'], response.get('message') + )) + + def _prepare_event(self, event): + '''Prepare *event* for sending.''' + event['source'].setdefault('id', self.id) + event['source'].setdefault('user', { + 'username': self._api_user + }) + + def _prepare_reply_event(self, event, source_event, source=None): + '''Prepare *event* as a reply to another *source_event*. + + Modify *event*, setting appropriate values to target event correctly as + a reply. + + ''' + event['target'] = 'id={0}'.format(source_event['source']['id']) + event['in_reply_to_event'] = source_event['id'] + if source is not None: + event['source'] = source + + def publish( + self, event, synchronous=False, on_reply=None, on_error='raise' + ): + '''Publish *event*. + + If *synchronous* is specified as True then this method will wait and + return a list of results from any called callbacks. + + .. note:: + + Currently, if synchronous is True then only locally registered + callbacks will be called and no event will be sent to the server. + This may change in future. + + *on_reply* is an optional callable to call with any reply event that is + received in response to the published *event*. + + .. note:: + + Will not be called when *synchronous* is True. + + If *on_error* is set to 'ignore' then errors raised during publish of + event will be caught by this method and ignored. + + ''' + + try: + return self._publish( + event, synchronous=synchronous, on_reply=on_reply + ) + except Exception: + if on_error == 'ignore': + pass + else: + raise + + def publish_reply(self, source_event, data, source=None): + '''Publish a reply event to *source_event* with supplied *data*. + + If *source* is specified it will be used for the source value of the + sent event. + + ''' + reply_event = ftrack_api.event.base.Event( + 'ftrack.meta.reply', + data=data + ) + self._prepare_reply_event(reply_event, source_event, source=source) + self.publish(reply_event) + + def _publish(self, event, synchronous=False, callback=None, on_reply=None): + '''Publish *event*. + + If *synchronous* is specified as True then this method will wait and + return a list of results from any called callbacks. + + .. note:: + + Currently, if synchronous is True then only locally registered + callbacks will be called and no event will be sent to the server. + This may change in future. + + A *callback* can also be specified. This callback will be called once + the server acknowledges receipt of the sent event. A default callback + that checks for errors from the server will be used if not specified. + + *on_reply* is an optional callable to call with any reply event that is + received in response to the published *event*. Note that there is no + guarantee that a reply will be sent. + + Raise :exc:`ftrack_api.exception.EventHubConnectionError` if not + currently connected. + + ''' + # Prepare event adding any relevant additional information. + self._prepare_event(event) + + if synchronous: + # Bypass emitting event to server and instead call locally + # registered handlers directly, collecting and returning results. + return self._handle(event, synchronous=synchronous) + + if not self.connected: + raise ftrack_api.exception.EventHubConnectionError( + 'Cannot publish event asynchronously as not connected to ' + 'server.' + ) + + # Use standard callback if none specified. + if callback is None: + callback = functools.partial(self._on_published, event) + + # Emit event to central server for asynchronous processing. + try: + # Register on reply callback if specified. + if on_reply is not None: + # TODO: Add cleanup process that runs after a set duration to + # garbage collect old reply callbacks and prevent dictionary + # growing too large. + self._reply_callbacks[event['id']] = on_reply + + try: + self._emit_event_packet( + self._event_namespace, event, callback=callback + ) + except ftrack_api.exception.EventHubConnectionError: + # Connection may have dropped temporarily. Wait a few moments to + # see if background thread reconnects automatically. + time.sleep(15) + + self._emit_event_packet( + self._event_namespace, event, callback=callback + ) + except: + raise + + except Exception: + # Failure to send event should not cause caller to fail. + # TODO: This behaviour is inconsistent with the failing earlier on + # lack of connection and also with the error handling parameter of + # EventHub.publish. Consider refactoring. + self.logger.exception(L('Error sending event {0}.', event)) + + def _on_published(self, event, response): + '''Handle acknowledgement of published event.''' + if response.get('success', False) is False: + self.logger.error(L( + 'Server responded with error while publishing event {0}. ' + 'Error was: {1}', event, response.get('message') + )) + + def _handle(self, event, synchronous=False): + '''Handle *event*. + + If *synchronous* is True, do not send any automatic reply events. + + ''' + # Sort by priority, lower is higher. + # TODO: Use a sorted list to avoid sorting each time in order to improve + # performance. + subscribers = sorted( + self._subscribers, key=operator.attrgetter('priority') + ) + + results = [] + + target = event.get('target', None) + target_expression = None + if target: + try: + target_expression = self._expression_parser.parse(target) + except Exception: + self.logger.exception(L( + 'Cannot handle event as failed to parse event target ' + 'information: {0}', event + )) + return + + for subscriber in subscribers: + # Check if event is targeted to the subscriber. + if ( + target_expression is not None + and not target_expression.match(subscriber.metadata) + ): + continue + + # Check if subscriber interested in the event. + if not subscriber.interested_in(event): + continue + + response = None + + try: + response = subscriber.callback(event) + results.append(response) + except Exception: + self.logger.exception(L( + 'Error calling subscriber {0} for event {1}.', + subscriber, event + )) + + # Automatically publish a non None response as a reply when not in + # synchronous mode. + if not synchronous: + + if response is not None: + try: + self.publish_reply( + event, data=response, source=subscriber.metadata + ) + + except Exception: + self.logger.exception(L( + 'Error publishing response {0} from subscriber {1} ' + 'for event {2}.', response, subscriber, event + )) + + # Check whether to continue processing topic event. + if event.is_stopped(): + self.logger.debug(L( + 'Subscriber {0} stopped event {1}. Will not process ' + 'subsequent subscriber callbacks for this event.', + subscriber, event + )) + break + + return results + + def _handle_reply(self, event): + '''Handle reply *event*, passing it to any registered callback.''' + callback = self._reply_callbacks.get(event['in_reply_to_event'], None) + if callback is not None: + callback(event) + + def subscription(self, subscription, callback, subscriber=None, + priority=100): + '''Return context manager with *callback* subscribed to *subscription*. + + The subscribed callback will be automatically unsubscribed on exit + of the context manager. + + ''' + return _SubscriptionContext( + self, subscription, callback, subscriber=subscriber, + priority=priority, + ) + + # Socket.IO interface. + # + + def _get_socket_io_session(self): + '''Connect to server and retrieve session information.''' + socket_io_url = ( + '{0}://{1}/socket.io/1/?api_user={2}&api_key={3}' + ).format( + self.server.scheme, + self.get_network_location(), + self._api_user, + self._api_key + ) + try: + response = requests.get( + socket_io_url, + timeout=60 # 60 seconds timeout to recieve errors faster. + ) + except requests.exceptions.Timeout as error: + raise ftrack_api.exception.EventHubConnectionError( + 'Timed out connecting to server: {0}.'.format(error) + ) + except requests.exceptions.SSLError as error: + raise ftrack_api.exception.EventHubConnectionError( + 'Failed to negotiate SSL with server: {0}.'.format(error) + ) + except requests.exceptions.ConnectionError as error: + raise ftrack_api.exception.EventHubConnectionError( + 'Failed to connect to server: {0}.'.format(error) + ) + else: + status = response.status_code + if status != 200: + raise ftrack_api.exception.EventHubConnectionError( + 'Received unexpected status code {0}.'.format(status) + ) + + # Parse result and return session information. + parts = response.text.split(':') + return SocketIoSession( + parts[0], + parts[1], + parts[3].split(',') + ) + + def _add_packet_callback(self, callback): + '''Store callback against a new unique packet ID. + + Return the unique packet ID. + + ''' + with self._lock: + self._unique_packet_id += 1 + unique_identifier = self._unique_packet_id + + self._packet_callbacks[unique_identifier] = callback + + return '{0}+'.format(unique_identifier) + + def _pop_packet_callback(self, packet_identifier): + '''Pop and return callback for *packet_identifier*.''' + return self._packet_callbacks.pop(packet_identifier) + + def _emit_event_packet(self, namespace, event, callback): + '''Send *event* packet under *namespace*.''' + data = self._encode( + dict(name=namespace, args=[event]) + ) + self._send_packet( + self._code_name_mapping['event'], data=data, callback=callback + ) + + def _acknowledge_packet(self, packet_identifier, *args): + '''Send acknowledgement of packet with *packet_identifier*.''' + packet_identifier = packet_identifier.rstrip('+') + data = str(packet_identifier) + if args: + data += '+{1}'.format(self._encode(args)) + + self._send_packet(self._code_name_mapping['acknowledge'], data=data) + + def _send_packet(self, code, data='', callback=None): + '''Send packet via connection.''' + path = '' + packet_identifier = ( + self._add_packet_callback(callback) if callback else '' + ) + packet_parts = (str(code), packet_identifier, path, data) + packet = ':'.join(packet_parts) + + try: + self._connection.send(packet) + self.logger.debug(L(u'Sent packet: {0}', packet)) + except socket.error as error: + raise ftrack_api.exception.EventHubConnectionError( + 'Failed to send packet: {0}'.format(error) + ) + + def _receive_packet(self): + '''Receive and return packet via connection.''' + try: + packet = self._connection.recv() + except Exception as error: + raise ftrack_api.exception.EventHubConnectionError( + 'Error receiving packet: {0}'.format(error) + ) + + try: + parts = packet.split(':', 3) + except AttributeError: + raise ftrack_api.exception.EventHubPacketError( + 'Received invalid packet {0}'.format(packet) + ) + + code, packet_identifier, path, data = None, None, None, None + + count = len(parts) + if count == 4: + code, packet_identifier, path, data = parts + elif count == 3: + code, packet_identifier, path = parts + elif count == 1: + code = parts[0] + else: + raise ftrack_api.exception.EventHubPacketError( + 'Received invalid packet {0}'.format(packet) + ) + + self.logger.debug(L('Received packet: {0}', packet)) + return code, packet_identifier, path, data + + def _handle_packet(self, code, packet_identifier, path, data): + '''Handle packet received from server.''' + code_name = self._code_name_mapping[code] + + if code_name == 'connect': + self.logger.debug('Connected to event server.') + event = ftrack_api.event.base.Event('ftrack.meta.connected') + self._prepare_event(event) + self._event_queue.put(event) + + elif code_name == 'disconnect': + self.logger.debug('Disconnected from event server.') + if not self._intentional_disconnect: + self.logger.debug( + 'Disconnected unexpectedly. Attempting to reconnect.' + ) + try: + self.reconnect( + attempts=self._auto_reconnect_attempts, + delay=self._auto_reconnect_delay + ) + except ftrack_api.exception.EventHubConnectionError: + self.logger.debug('Failed to reconnect automatically.') + else: + self.logger.debug('Reconnected successfully.') + + if not self.connected: + event = ftrack_api.event.base.Event('ftrack.meta.disconnected') + self._prepare_event(event) + self._event_queue.put(event) + + elif code_name == 'heartbeat': + # Reply with heartbeat. + self._send_packet(self._code_name_mapping['heartbeat']) + + elif code_name == 'message': + self.logger.debug(L('Message received: {0}', data)) + + elif code_name == 'event': + payload = self._decode(data) + args = payload.get('args', []) + + if len(args) == 1: + event_payload = args[0] + if isinstance(event_payload, collections.Mapping): + try: + event = ftrack_api.event.base.Event(**event_payload) + except Exception: + self.logger.exception(L( + 'Failed to convert payload into event: {0}', + event_payload + )) + return + + self._event_queue.put(event) + + elif code_name == 'acknowledge': + parts = data.split('+', 1) + acknowledged_packet_identifier = int(parts[0]) + args = [] + if len(parts) == 2: + args = self._decode(parts[1]) + + try: + callback = self._pop_packet_callback( + acknowledged_packet_identifier + ) + except KeyError: + pass + else: + callback(*args) + + elif code_name == 'error': + self.logger.error(L('Event server reported error: {0}.', data)) + + else: + self.logger.debug(L('{0}: {1}', code_name, data)) + + def _encode(self, data): + '''Return *data* encoded as JSON formatted string.''' + return json.dumps( + data, + default=self._encode_object_hook, + ensure_ascii=False + ) + + def _encode_object_hook(self, item): + '''Return *item* transformed for encoding.''' + if isinstance(item, ftrack_api.event.base.Event): + # Convert to dictionary for encoding. + item = dict(**item) + + if 'in_reply_to_event' in item: + # Convert keys to server convention. + item['inReplyToEvent'] = item.pop('in_reply_to_event') + + return item + + raise TypeError('{0!r} is not JSON serializable'.format(item)) + + def _decode(self, string): + '''Return decoded JSON *string* as Python object.''' + return json.loads(string, object_hook=self._decode_object_hook) + + def _decode_object_hook(self, item): + '''Return *item* transformed.''' + if isinstance(item, collections.Mapping): + if 'inReplyToEvent' in item: + item['in_reply_to_event'] = item.pop('inReplyToEvent') + + return item + + +class _SubscriptionContext(object): + '''Context manager for a one-off subscription.''' + + def __init__(self, hub, subscription, callback, subscriber, priority): + '''Initialise context.''' + self._hub = hub + self._subscription = subscription + self._callback = callback + self._subscriber = subscriber + self._priority = priority + self._subscriberIdentifier = None + + def __enter__(self): + '''Enter context subscribing callback to topic.''' + self._subscriberIdentifier = self._hub.subscribe( + self._subscription, self._callback, subscriber=self._subscriber, + priority=self._priority + ) + + def __exit__(self, exception_type, exception_value, traceback): + '''Exit context unsubscribing callback from topic.''' + self._hub.unsubscribe(self._subscriberIdentifier) + + +class _ProcessorThread(threading.Thread): + '''Process messages from server.''' + + daemon = True + + def __init__(self, client): + '''Initialise thread with Socket.IO *client* instance.''' + super(_ProcessorThread, self).__init__() + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + self.client = client + self.done = threading.Event() + + def run(self): + '''Perform work in thread.''' + while not self.done.is_set(): + try: + code, packet_identifier, path, data = self.client._receive_packet() + self.client._handle_packet(code, packet_identifier, path, data) + + except ftrack_api.exception.EventHubPacketError as error: + self.logger.debug(L('Ignoring invalid packet: {0}', error)) + continue + + except ftrack_api.exception.EventHubConnectionError: + self.cancel() + + # Fake a disconnection event in order to trigger reconnection + # when necessary. + self.client._handle_packet('0', '', '', '') + + break + + except Exception as error: + self.logger.debug(L('Aborting processor thread: {0}', error)) + self.cancel() + break + + def cancel(self): + '''Cancel work as soon as possible.''' + self.done.set() diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/event/subscriber.py b/pype/modules/ftrack/python2_vendor/ftrack_api/event/subscriber.py new file mode 100644 index 0000000000..2afdaa7877 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/event/subscriber.py @@ -0,0 +1,28 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from builtins import object +import ftrack_api.event.subscription + + +class Subscriber(object): + '''Represent event subscriber.''' + + def __init__(self, subscription, callback, metadata, priority): + '''Initialise subscriber.''' + self.subscription = ftrack_api.event.subscription.Subscription( + subscription + ) + self.callback = callback + self.metadata = metadata + self.priority = priority + + def __str__(self): + '''Return string representation.''' + return '<{0} metadata={1} subscription="{2}">'.format( + self.__class__.__name__, self.metadata, self.subscription + ) + + def interested_in(self, event): + '''Return whether subscriber interested in *event*.''' + return self.subscription.includes(event) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/event/subscription.py b/pype/modules/ftrack/python2_vendor/ftrack_api/event/subscription.py new file mode 100644 index 0000000000..f3a839f586 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/event/subscription.py @@ -0,0 +1,24 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from builtins import object +import ftrack_api.event.expression + + +class Subscription(object): + '''Represent a subscription.''' + + parser = ftrack_api.event.expression.Parser() + + def __init__(self, subscription): + '''Initialise with *subscription*.''' + self._subscription = subscription + self._expression = self.parser.parse(subscription) + + def __str__(self): + '''Return string representation.''' + return self._subscription + + def includes(self, event): + '''Return whether subscription includes *event*.''' + return self._expression.match(event) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/exception.py b/pype/modules/ftrack/python2_vendor/ftrack_api/exception.py new file mode 100644 index 0000000000..77bdf5b1ae --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/exception.py @@ -0,0 +1,393 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from builtins import str +import sys +import traceback + +import ftrack_api.entity.base + + +class Error(Exception): + '''ftrack specific error.''' + + default_message = 'Unspecified error occurred.' + + def __init__(self, message=None, details=None): + '''Initialise exception with *message*. + + If *message* is None, the class 'default_message' will be used. + + *details* should be a mapping of extra information that can be used in + the message and also to provide more context. + + ''' + if message is None: + message = self.default_message + + self.message = message + self.details = details + if self.details is None: + self.details = {} + + self.traceback = traceback.format_exc() + + def __str__(self): + '''Return string representation.''' + keys = {} + for key, value in self.details.items(): + if isinstance(value, str): + value = value.encode(sys.getfilesystemencoding()) + keys[key] = value + + return str(self.message.format(**keys)) + + +class AuthenticationError(Error): + '''Raise when an authentication error occurs.''' + + default_message = 'Authentication error.' + + +class ServerError(Error): + '''Raise when the server reports an error.''' + + default_message = 'Server reported error processing request.' + + +class ServerCompatibilityError(ServerError): + '''Raise when server appears incompatible.''' + + default_message = 'Server incompatible.' + + +class NotFoundError(Error): + '''Raise when something that should exist is not found.''' + + default_message = 'Not found.' + + +class NotUniqueError(Error): + '''Raise when unique value required and duplicate detected.''' + + default_message = 'Non-unique value detected.' + + +class IncorrectResultError(Error): + '''Raise when a result is incorrect.''' + + default_message = 'Incorrect result detected.' + + +class NoResultFoundError(IncorrectResultError): + '''Raise when a result was expected but no result was found.''' + + default_message = 'Expected result, but no result was found.' + + +class MultipleResultsFoundError(IncorrectResultError): + '''Raise when a single result expected, but multiple results found.''' + + default_message = 'Expected single result, but received multiple results.' + + +class EntityTypeError(Error): + '''Raise when an entity type error occurs.''' + + default_message = 'Entity type error.' + + +class UnrecognisedEntityTypeError(EntityTypeError): + '''Raise when an unrecognised entity type detected.''' + + default_message = 'Entity type "{entity_type}" not recognised.' + + def __init__(self, entity_type, **kw): + '''Initialise with *entity_type* that is unrecognised.''' + kw.setdefault('details', {}).update(dict( + entity_type=entity_type + )) + super(UnrecognisedEntityTypeError, self).__init__(**kw) + + +class OperationError(Error): + '''Raise when an operation error occurs.''' + + default_message = 'Operation error.' + + +class InvalidStateError(Error): + '''Raise when an invalid state detected.''' + + default_message = 'Invalid state.' + + +class InvalidStateTransitionError(InvalidStateError): + '''Raise when an invalid state transition detected.''' + + default_message = ( + 'Invalid transition from {current_state!r} to {target_state!r} state ' + 'for entity {entity!r}' + ) + + def __init__(self, current_state, target_state, entity, **kw): + '''Initialise error.''' + kw.setdefault('details', {}).update(dict( + current_state=current_state, + target_state=target_state, + entity=entity + )) + super(InvalidStateTransitionError, self).__init__(**kw) + + +class AttributeError(Error): + '''Raise when an error related to an attribute occurs.''' + + default_message = 'Attribute error.' + + +class ImmutableAttributeError(AttributeError): + '''Raise when modification of immutable attribute attempted.''' + + default_message = ( + 'Cannot modify value of immutable {attribute.name!r} attribute.' + ) + + def __init__(self, attribute, **kw): + '''Initialise error.''' + kw.setdefault('details', {}).update(dict( + attribute=attribute + )) + super(ImmutableAttributeError, self).__init__(**kw) + + +class CollectionError(Error): + '''Raise when an error related to collections occurs.''' + + default_message = 'Collection error.' + + def __init__(self, collection, **kw): + '''Initialise error.''' + kw.setdefault('details', {}).update(dict( + collection=collection + )) + super(CollectionError, self).__init__(**kw) + + +class ImmutableCollectionError(CollectionError): + '''Raise when modification of immutable collection attempted.''' + + default_message = ( + 'Cannot modify value of immutable collection {collection!r}.' + ) + + +class DuplicateItemInCollectionError(CollectionError): + '''Raise when duplicate item in collection detected.''' + + default_message = ( + 'Item {item!r} already exists in collection {collection!r}.' + ) + + def __init__(self, item, collection, **kw): + '''Initialise error.''' + kw.setdefault('details', {}).update(dict( + item=item + )) + super(DuplicateItemInCollectionError, self).__init__(collection, **kw) + + +class ParseError(Error): + '''Raise when a parsing error occurs.''' + + default_message = 'Failed to parse.' + + +class EventHubError(Error): + '''Raise when issues related to event hub occur.''' + + default_message = 'Event hub error occurred.' + + +class EventHubConnectionError(EventHubError): + '''Raise when event hub encounters connection problem.''' + + default_message = 'Event hub is not connected.' + + +class EventHubPacketError(EventHubError): + '''Raise when event hub encounters an issue with a packet.''' + + default_message = 'Invalid packet.' + + +class PermissionDeniedError(Error): + '''Raise when permission is denied.''' + + default_message = 'Permission denied.' + + +class LocationError(Error): + '''Base for errors associated with locations.''' + + default_message = 'Unspecified location error' + + +class ComponentNotInAnyLocationError(LocationError): + '''Raise when component not available in any location.''' + + default_message = 'Component not available in any location.' + + +class ComponentNotInLocationError(LocationError): + '''Raise when component(s) not in location.''' + + default_message = ( + 'Component(s) {formatted_components} not found in location {location}.' + ) + + def __init__(self, components, location, **kw): + '''Initialise with *components* and *location*.''' + if isinstance(components, ftrack_api.entity.base.Entity): + components = [components] + + kw.setdefault('details', {}).update(dict( + components=components, + formatted_components=', '.join( + [str(component) for component in components] + ), + location=location + )) + + super(ComponentNotInLocationError, self).__init__(**kw) + + +class ComponentInLocationError(LocationError): + '''Raise when component(s) already exists in location.''' + + default_message = ( + 'Component(s) {formatted_components} already exist in location ' + '{location}.' + ) + + def __init__(self, components, location, **kw): + '''Initialise with *components* and *location*.''' + if isinstance(components, ftrack_api.entity.base.Entity): + components = [components] + + kw.setdefault('details', {}).update(dict( + components=components, + formatted_components=', '.join( + [str(component) for component in components] + ), + location=location + )) + + super(ComponentInLocationError, self).__init__(**kw) + + +class AccessorError(Error): + '''Base for errors associated with accessors.''' + + default_message = 'Unspecified accessor error' + + +class AccessorOperationFailedError(AccessorError): + '''Base for failed operations on accessors.''' + + default_message = 'Operation {operation} failed: {error}' + + def __init__( + self, operation='', resource_identifier=None, error=None, **kw + ): + kw.setdefault('details', {}).update(dict( + operation=operation, + resource_identifier=resource_identifier, + error=error + )) + super(AccessorOperationFailedError, self).__init__(**kw) + + +class AccessorUnsupportedOperationError(AccessorOperationFailedError): + '''Raise when operation is unsupported.''' + + default_message = 'Operation {operation} unsupported.' + + +class AccessorPermissionDeniedError(AccessorOperationFailedError): + '''Raise when permission denied.''' + + default_message = ( + 'Cannot {operation} {resource_identifier}. Permission denied.' + ) + + +class AccessorResourceIdentifierError(AccessorError): + '''Raise when a error related to a resource_identifier occurs.''' + + default_message = 'Resource identifier is invalid: {resource_identifier}.' + + def __init__(self, resource_identifier, **kw): + kw.setdefault('details', {}).update(dict( + resource_identifier=resource_identifier + )) + super(AccessorResourceIdentifierError, self).__init__(**kw) + + +class AccessorFilesystemPathError(AccessorResourceIdentifierError): + '''Raise when a error related to an accessor filesystem path occurs.''' + + default_message = ( + 'Could not determine filesystem path from resource identifier: ' + '{resource_identifier}.' + ) + + +class AccessorResourceError(AccessorError): + '''Base for errors associated with specific resource.''' + + default_message = 'Unspecified resource error: {resource_identifier}' + + def __init__(self, operation='', resource_identifier=None, error=None, + **kw): + kw.setdefault('details', {}).update(dict( + operation=operation, + resource_identifier=resource_identifier + )) + super(AccessorResourceError, self).__init__(**kw) + + +class AccessorResourceNotFoundError(AccessorResourceError): + '''Raise when a required resource is not found.''' + + default_message = 'Resource not found: {resource_identifier}' + + +class AccessorParentResourceNotFoundError(AccessorResourceError): + '''Raise when a parent resource (such as directory) is not found.''' + + default_message = 'Parent resource is missing: {resource_identifier}' + + +class AccessorResourceInvalidError(AccessorResourceError): + '''Raise when a resource is not the right type.''' + + default_message = 'Resource invalid: {resource_identifier}' + + +class AccessorContainerNotEmptyError(AccessorResourceError): + '''Raise when container is not empty.''' + + default_message = 'Container is not empty: {resource_identifier}' + + +class StructureError(Error): + '''Base for errors associated with structures.''' + + default_message = 'Unspecified structure error' + + +class ConnectionClosedError(Error): + '''Raise when attempt to use closed connection detected.''' + + default_message = "Connection closed." diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/formatter.py b/pype/modules/ftrack/python2_vendor/ftrack_api/formatter.py new file mode 100644 index 0000000000..619c39c6eb --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/formatter.py @@ -0,0 +1,132 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from builtins import str +import termcolor + +import ftrack_api.entity.base +import ftrack_api.collection +import ftrack_api.symbol +import ftrack_api.inspection + + +#: Useful filters to pass to :func:`format`.` +FILTER = { + 'ignore_unset': ( + lambda entity, name, value: value is not ftrack_api.symbol.NOT_SET + ) +} + + +def format( + entity, formatters=None, attribute_filter=None, recursive=False, + indent=0, indent_first_line=True, _seen=None +): + '''Return formatted string representing *entity*. + + *formatters* can be used to customise formatting of elements. It should be a + mapping with one or more of the following keys: + + * header - Used to format entity type. + * label - Used to format attribute names. + + Specify an *attribute_filter* to control which attributes to include. By + default all attributes are included. The *attribute_filter* should be a + callable that accepts `(entity, attribute_name, attribute_value)` and + returns True if the attribute should be included in the output. For example, + to filter out all unset values:: + + attribute_filter=ftrack_api.formatter.FILTER['ignore_unset'] + + If *recursive* is True then recurse into Collections and format each entity + present. + + *indent* specifies the overall indentation in spaces of the formatted text, + whilst *indent_first_line* determines whether to apply that indent to the + first generated line. + + .. warning:: + + Iterates over all *entity* attributes which may cause multiple queries + to the server. Turn off auto populating in the session to prevent this. + + ''' + # Initialise default formatters. + if formatters is None: + formatters = dict() + + formatters.setdefault( + 'header', lambda text: termcolor.colored( + text, 'white', 'on_blue', attrs=['bold'] + ) + ) + formatters.setdefault( + 'label', lambda text: termcolor.colored( + text, 'blue', attrs=['bold'] + ) + ) + + # Determine indents. + spacer = ' ' * indent + if indent_first_line: + first_line_spacer = spacer + else: + first_line_spacer = '' + + # Avoid infinite recursion on circular references. + if _seen is None: + _seen = set() + + identifier = str(ftrack_api.inspection.identity(entity)) + if identifier in _seen: + return ( + first_line_spacer + + formatters['header'](entity.entity_type) + '{...}' + ) + + _seen.add(identifier) + information = list() + + information.append( + first_line_spacer + formatters['header'](entity.entity_type) + ) + for key, value in sorted(entity.items()): + if attribute_filter is not None: + if not attribute_filter(entity, key, value): + continue + + child_indent = indent + len(key) + 3 + + if isinstance(value, ftrack_api.entity.base.Entity): + value = format( + value, + formatters=formatters, + attribute_filter=attribute_filter, + recursive=recursive, + indent=child_indent, + indent_first_line=False, + _seen=_seen.copy() + ) + + if isinstance(value, ftrack_api.collection.Collection): + if recursive: + child_values = [] + for index, child in enumerate(value): + child_value = format( + child, + formatters=formatters, + attribute_filter=attribute_filter, + recursive=recursive, + indent=child_indent, + indent_first_line=index != 0, + _seen=_seen.copy() + ) + child_values.append(child_value) + + value = '\n'.join(child_values) + + information.append( + spacer + u' {0}: {1}'.format(formatters['label'](key), value) + ) + + return '\n'.join(information) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/inspection.py b/pype/modules/ftrack/python2_vendor/ftrack_api/inspection.py new file mode 100644 index 0000000000..cde648d2e0 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/inspection.py @@ -0,0 +1,138 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +from builtins import str +from future.utils import native_str +import collections + +import ftrack_api.symbol +import ftrack_api.operation + + +def identity(entity): + '''Return unique identity of *entity*.''' + return ( + str(entity.entity_type), + list(primary_key(entity).values()) + ) + + +def primary_key(entity): + '''Return primary key of *entity* as an ordered mapping of {field: value}. + + To get just the primary key values:: + + primary_key(entity).values() + + ''' + primary_key = collections.OrderedDict() + for name in entity.primary_key_attributes: + value = entity[name] + if value is ftrack_api.symbol.NOT_SET: + raise KeyError( + 'Missing required value for primary key attribute "{0}" on ' + 'entity {1!r}.'.format(name, entity) + ) + + # todo: Compatiblity fix, review for better implementation. + primary_key[native_str(name)] = native_str(value) + + return primary_key + + +def _state(operation, state): + '''Return state following *operation* against current *state*.''' + if ( + isinstance( + operation, ftrack_api.operation.CreateEntityOperation + ) + and state is ftrack_api.symbol.NOT_SET + ): + state = ftrack_api.symbol.CREATED + + elif ( + isinstance( + operation, ftrack_api.operation.UpdateEntityOperation + ) + and state is ftrack_api.symbol.NOT_SET + ): + state = ftrack_api.symbol.MODIFIED + + elif isinstance( + operation, ftrack_api.operation.DeleteEntityOperation + ): + state = ftrack_api.symbol.DELETED + + return state + + +def state(entity): + '''Return current *entity* state. + + .. seealso:: :func:`ftrack_api.inspection.states`. + + ''' + value = ftrack_api.symbol.NOT_SET + + for operation in entity.session.recorded_operations: + # Determine if operation refers to an entity and whether that entity + # is *entity*. + if ( + isinstance( + operation, + ( + ftrack_api.operation.CreateEntityOperation, + ftrack_api.operation.UpdateEntityOperation, + ftrack_api.operation.DeleteEntityOperation + ) + ) + and operation.entity_type == entity.entity_type + and operation.entity_key == primary_key(entity) + ): + value = _state(operation, value) + + return value + + +def states(entities): + '''Return current states of *entities*. + + An optimised function for determining states of multiple entities in one + go. + + .. note:: + + All *entities* should belong to the same session. + + .. seealso:: :func:`ftrack_api.inspection.state`. + + ''' + if not entities: + return [] + + session = entities[0].session + + entities_by_identity = collections.OrderedDict() + for entity in entities: + key = (entity.entity_type, str(list(primary_key(entity).values()))) + entities_by_identity[key] = ftrack_api.symbol.NOT_SET + + for operation in session.recorded_operations: + if ( + isinstance( + operation, + ( + ftrack_api.operation.CreateEntityOperation, + ftrack_api.operation.UpdateEntityOperation, + ftrack_api.operation.DeleteEntityOperation + ) + ) + ): + key = (operation.entity_type, str(list(operation.entity_key.values()))) + if key not in entities_by_identity: + continue + + value = _state(operation, entities_by_identity[key]) + entities_by_identity[key] = value + + return list(entities_by_identity.values()) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/logging.py b/pype/modules/ftrack/python2_vendor/ftrack_api/logging.py new file mode 100644 index 0000000000..b287049637 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/logging.py @@ -0,0 +1,43 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2016 ftrack + +from builtins import object +import functools +import warnings + + +def deprecation_warning(message): + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + warnings.warn( + message, + PendingDeprecationWarning + ) + return function(*args, **kwargs) + return wrapper + + return decorator + +class LazyLogMessage(object): + '''A log message that can be evaluated lazily for improved performance. + + Example:: + + # Formatting of string will not occur unless debug logging enabled. + logger.debug(LazyLogMessage( + 'Hello {0}', 'world' + )) + + ''' + + def __init__(self, message, *args, **kwargs): + '''Initialise with *message* format string and arguments.''' + self.message = message + self.args = args + self.kwargs = kwargs + + def __str__(self): + '''Return string representation.''' + return self.message.format(*self.args, **self.kwargs) + diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/operation.py b/pype/modules/ftrack/python2_vendor/ftrack_api/operation.py new file mode 100644 index 0000000000..521712c046 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/operation.py @@ -0,0 +1,116 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +from builtins import object +import copy + + +class Operations(object): + '''Stack of operations.''' + + def __init__(self): + '''Initialise stack.''' + self._stack = [] + super(Operations, self).__init__() + + def clear(self): + '''Clear all operations.''' + del self._stack[:] + + def push(self, operation): + '''Push *operation* onto stack.''' + self._stack.append(operation) + + def pop(self): + '''Pop and return most recent operation from stack.''' + return self._stack.pop() + + def __len__(self): + '''Return count of operations.''' + return len(self._stack) + + def __iter__(self): + '''Return iterator over operations.''' + return iter(self._stack) + + +class Operation(object): + '''Represent an operation.''' + + +class CreateEntityOperation(Operation): + '''Represent create entity operation.''' + + def __init__(self, entity_type, entity_key, entity_data): + '''Initialise operation. + + *entity_type* should be the type of entity in string form (as returned + from :attr:`ftrack_api.entity.base.Entity.entity_type`). + + *entity_key* should be the unique key for the entity and should follow + the form returned from :func:`ftrack_api.inspection.primary_key`. + + *entity_data* should be a mapping of the initial data to populate the + entity with when creating. + + .. note:: + + Shallow copies will be made of each value in *entity_data*. + + ''' + super(CreateEntityOperation, self).__init__() + self.entity_type = entity_type + self.entity_key = entity_key + self.entity_data = {} + for key, value in list(entity_data.items()): + self.entity_data[key] = copy.copy(value) + + +class UpdateEntityOperation(Operation): + '''Represent update entity operation.''' + + def __init__( + self, entity_type, entity_key, attribute_name, old_value, new_value + ): + '''Initialise operation. + + *entity_type* should be the type of entity in string form (as returned + from :attr:`ftrack_api.entity.base.Entity.entity_type`). + + *entity_key* should be the unique key for the entity and should follow + the form returned from :func:`ftrack_api.inspection.primary_key`. + + *attribute_name* should be the string name of the attribute being + modified and *old_value* and *new_value* should reflect the change in + value. + + .. note:: + + Shallow copies will be made of both *old_value* and *new_value*. + + ''' + super(UpdateEntityOperation, self).__init__() + self.entity_type = entity_type + self.entity_key = entity_key + self.attribute_name = attribute_name + self.old_value = copy.copy(old_value) + self.new_value = copy.copy(new_value) + + +class DeleteEntityOperation(Operation): + '''Represent delete entity operation.''' + + def __init__(self, entity_type, entity_key): + '''Initialise operation. + + *entity_type* should be the type of entity in string form (as returned + from :attr:`ftrack_api.entity.base.Entity.entity_type`). + + *entity_key* should be the unique key for the entity and should follow + the form returned from :func:`ftrack_api.inspection.primary_key`. + + ''' + super(DeleteEntityOperation, self).__init__() + self.entity_type = entity_type + self.entity_key = entity_key + diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/plugin.py b/pype/modules/ftrack/python2_vendor/ftrack_api/plugin.py new file mode 100644 index 0000000000..ab1142685c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/plugin.py @@ -0,0 +1,123 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from __future__ import absolute_import + +import logging +import os +import uuid +import imp +import inspect +import traceback + +def discover(paths, positional_arguments=None, keyword_arguments=None): + '''Find and load plugins in search *paths*. + + Each discovered module should implement a register function that accepts + *positional_arguments* and *keyword_arguments* as \*args and \*\*kwargs + respectively. + + If a register function does not accept variable arguments, then attempt to + only pass accepted arguments to the function by inspecting its signature. + + ''' + logger = logging.getLogger(__name__ + '.discover') + + if positional_arguments is None: + positional_arguments = [] + + if keyword_arguments is None: + keyword_arguments = {} + + for path in paths: + # Ignore empty paths that could resolve to current directory. + path = path.strip() + if not path: + continue + + for base, directories, filenames in os.walk(path): + for filename in filenames: + name, extension = os.path.splitext(filename) + if extension != '.py': + continue + + module_path = os.path.join(base, filename) + unique_name = uuid.uuid4().hex + + try: + module = imp.load_source(unique_name, module_path) + except Exception as error: + logger.warning( + 'Failed to load plugin from "{0}": {1}' + .format(module_path, error) + ) + logger.debug( + traceback.format_exc()) + continue + + try: + module.register + except AttributeError: + logger.warning( + 'Failed to load plugin that did not define a ' + '"register" function at the module level: {0}' + .format(module_path) + ) + else: + # Attempt to only pass arguments that are accepted by the + # register function. + specification = inspect.getargspec(module.register) + + selected_positional_arguments = positional_arguments + selected_keyword_arguments = keyword_arguments + + if ( + not specification.varargs and + len(positional_arguments) > len(specification.args) + ): + logger.warning( + 'Culling passed arguments to match register ' + 'function signature.' + ) + + selected_positional_arguments = positional_arguments[ + len(specification.args): + ] + selected_keyword_arguments = {} + + elif not specification.keywords: + # Remove arguments that have been passed as positionals. + remainder = specification.args[ + len(positional_arguments): + ] + + # Determine remaining available keyword arguments. + defined_keyword_arguments = [] + if specification.defaults: + defined_keyword_arguments = specification.args[ + -len(specification.defaults): + ] + + remaining_keyword_arguments = set([ + keyword_argument for keyword_argument + in defined_keyword_arguments + if keyword_argument in remainder + ]) + + if not set(keyword_arguments.keys()).issubset( + remaining_keyword_arguments + ): + logger.warning( + 'Culling passed arguments to match register ' + 'function signature.' + ) + selected_keyword_arguments = { + key: value + for key, value in list(keyword_arguments.items()) + if key in remaining_keyword_arguments + } + + module.register( + *selected_positional_arguments, + **selected_keyword_arguments + ) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/query.py b/pype/modules/ftrack/python2_vendor/ftrack_api/query.py new file mode 100644 index 0000000000..ea101a29d4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/query.py @@ -0,0 +1,202 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +import re +import collections + +import ftrack_api.exception + + +class QueryResult(collections.Sequence): + '''Results from a query.''' + + OFFSET_EXPRESSION = re.compile('(?Poffset (?P\d+))') + LIMIT_EXPRESSION = re.compile('(?Plimit (?P\d+))') + + def __init__(self, session, expression, page_size=500): + '''Initialise result set. + + *session* should be an instance of :class:`ftrack_api.session.Session` + that will be used for executing the query *expression*. + + *page_size* should be an integer specifying the maximum number of + records to fetch in one request allowing the results to be fetched + incrementally in a transparent manner for optimal performance. Any + offset or limit specified in *expression* are honoured for final result + set, but intermediate queries may be issued with different offsets and + limits in order to fetch pages. When an embedded limit is smaller than + the given *page_size* it will be used instead and no paging will take + place. + + .. warning:: + + Setting *page_size* to a very large amount may negatively impact + performance of not only the caller, but the server in general. + + ''' + super(QueryResult, self).__init__() + self._session = session + self._results = [] + + ( + self._expression, + self._offset, + self._limit + ) = self._extract_offset_and_limit(expression) + + self._page_size = page_size + if self._limit is not None and self._limit < self._page_size: + # Optimise case where embedded limit is less than fetching a + # single page. + self._page_size = self._limit + + self._next_offset = self._offset + if self._next_offset is None: + # Initialise with zero offset. + self._next_offset = 0 + + def _extract_offset_and_limit(self, expression): + '''Process *expression* extracting offset and limit. + + Return (expression, offset, limit). + + ''' + offset = None + match = self.OFFSET_EXPRESSION.search(expression) + if match: + offset = int(match.group('value')) + expression = ( + expression[:match.start('offset')] + + expression[match.end('offset'):] + ) + + limit = None + match = self.LIMIT_EXPRESSION.search(expression) + if match: + limit = int(match.group('value')) + expression = ( + expression[:match.start('limit')] + + expression[match.end('limit'):] + ) + + return expression.strip(), offset, limit + + def __getitem__(self, index): + '''Return value at *index*.''' + while self._can_fetch_more() and index >= len(self._results): + self._fetch_more() + + return self._results[index] + + def __len__(self): + '''Return number of items.''' + while self._can_fetch_more(): + self._fetch_more() + + return len(self._results) + + def _can_fetch_more(self): + '''Return whether more results are available to fetch.''' + return self._next_offset is not None + + def _fetch_more(self): + '''Fetch next page of results if available.''' + if not self._can_fetch_more(): + return + + expression = '{0} offset {1} limit {2}'.format( + self._expression, self._next_offset, self._page_size + ) + records, metadata = self._session._query(expression) + self._results.extend(records) + + if self._limit is not None and (len(self._results) >= self._limit): + # Original limit reached. + self._next_offset = None + del self._results[self._limit:] + else: + # Retrieve next page offset from returned metadata. + self._next_offset = metadata.get('next', {}).get('offset', None) + + def all(self): + '''Fetch and return all data.''' + return list(self) + + def one(self): + '''Return exactly one single result from query by applying a limit. + + Raise :exc:`ValueError` if an existing limit is already present in the + expression. + + Raise :exc:`ValueError` if an existing offset is already present in the + expression as offset is inappropriate when expecting a single item. + + Raise :exc:`~ftrack_api.exception.MultipleResultsFoundError` if more + than one result was available or + :exc:`~ftrack_api.exception.NoResultFoundError` if no results were + available. + + .. note:: + + Both errors subclass + :exc:`~ftrack_api.exception.IncorrectResultError` if you want to + catch only one error type. + + ''' + expression = self._expression + + if self._limit is not None: + raise ValueError( + 'Expression already contains a limit clause.' + ) + + if self._offset is not None: + raise ValueError( + 'Expression contains an offset clause which does not make ' + 'sense when selecting a single item.' + ) + + # Apply custom limit as optimisation. A limit of 2 is used rather than + # 1 so that it is possible to test for multiple matching entries + # case. + expression += ' limit 2' + + results, metadata = self._session._query(expression) + + if not results: + raise ftrack_api.exception.NoResultFoundError() + + if len(results) != 1: + raise ftrack_api.exception.MultipleResultsFoundError() + + return results[0] + + def first(self): + '''Return first matching result from query by applying a limit. + + Raise :exc:`ValueError` if an existing limit is already present in the + expression. + + If no matching result available return None. + + ''' + expression = self._expression + + if self._limit is not None: + raise ValueError( + 'Expression already contains a limit clause.' + ) + + # Apply custom offset if present. + if self._offset is not None: + expression += ' offset {0}'.format(self._offset) + + # Apply custom limit as optimisation. + expression += ' limit 1' + + results, metadata = self._session._query(expression) + + if results: + return results[0] + + return None diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/__init__.py new file mode 100644 index 0000000000..1aab07ed77 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/__init__.py @@ -0,0 +1,2 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/base.py new file mode 100644 index 0000000000..b4cbbc3a2a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/resource_identifier_transformer/base.py @@ -0,0 +1,51 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + + +from builtins import object +class ResourceIdentifierTransformer(object): + '''Transform resource identifiers. + + Provide ability to modify resource identifier before it is stored centrally + (:meth:`encode`), or after it has been retrieved, but before it is used + locally (:meth:`decode`). + + For example, you might want to decompose paths into a set of key, value + pairs to store centrally and then compose a path from those values when + reading back. + + .. note:: + + This is separate from any transformations an + :class:`ftrack_api.accessor.base.Accessor` may perform and is targeted + towards common transformations. + + ''' + + def __init__(self, session): + '''Initialise resource identifier transformer. + + *session* should be the :class:`ftrack_api.session.Session` instance + to use for communication with the server. + + ''' + self.session = session + super(ResourceIdentifierTransformer, self).__init__() + + def encode(self, resource_identifier, context=None): + '''Return encoded *resource_identifier* for storing centrally. + + A mapping of *context* values may be supplied to guide the + transformation. + + ''' + return resource_identifier + + def decode(self, resource_identifier, context=None): + '''Return decoded *resource_identifier* for use locally. + + A mapping of *context* values may be supplied to guide the + transformation. + + ''' + return resource_identifier diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/session.py b/pype/modules/ftrack/python2_vendor/ftrack_api/session.py new file mode 100644 index 0000000000..55d4047c78 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/session.py @@ -0,0 +1,2468 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from __future__ import absolute_import +from __future__ import division + +from builtins import zip +from builtins import map +from builtins import str +from six import string_types +from builtins import object +import json +import logging +import collections +import datetime +import os +import getpass +import functools +import itertools +import distutils.version +import hashlib +import tempfile +import threading +import atexit +import warnings + +import requests +import requests.auth +import arrow +import clique +import appdirs + +import ftrack_api +import ftrack_api.exception +import ftrack_api.entity.factory +import ftrack_api.entity.base +import ftrack_api.entity.location +import ftrack_api.cache +import ftrack_api.symbol +import ftrack_api.query +import ftrack_api.attribute +import ftrack_api.collection +import ftrack_api.event.hub +import ftrack_api.event.base +import ftrack_api.plugin +import ftrack_api.inspection +import ftrack_api.operation +import ftrack_api.accessor.disk +import ftrack_api.structure.origin +import ftrack_api.structure.entity_id +import ftrack_api.accessor.server +import ftrack_api._centralized_storage_scenario +import ftrack_api.logging +from ftrack_api.logging import LazyLogMessage as L + +try: + from weakref import WeakMethod +except ImportError: + from ftrack_api._weakref import WeakMethod + + +class SessionAuthentication(requests.auth.AuthBase): + '''Attach ftrack session authentication information to requests.''' + + def __init__(self, api_key, api_user): + '''Initialise with *api_key* and *api_user*.''' + self.api_key = api_key + self.api_user = api_user + super(SessionAuthentication, self).__init__() + + def __call__(self, request): + '''Modify *request* to have appropriate headers.''' + request.headers.update({ + 'ftrack-api-key': self.api_key, + 'ftrack-user': self.api_user + }) + return request + + +class Session(object): + '''An isolated session for interaction with an ftrack server.''' + + def __init__( + self, server_url=None, api_key=None, api_user=None, auto_populate=True, + plugin_paths=None, cache=None, cache_key_maker=None, + auto_connect_event_hub=False, schema_cache_path=None, + plugin_arguments=None, timeout=60 + ): + '''Initialise session. + + *server_url* should be the URL of the ftrack server to connect to + including any port number. If not specified attempt to look up from + :envvar:`FTRACK_SERVER`. + + *api_key* should be the API key to use for authentication whilst + *api_user* should be the username of the user in ftrack to record + operations against. If not specified, *api_key* should be retrieved + from :envvar:`FTRACK_API_KEY` and *api_user* from + :envvar:`FTRACK_API_USER`. + + If *auto_populate* is True (the default), then accessing entity + attributes will cause them to be automatically fetched from the server + if they are not already. This flag can be changed on the session + directly at any time. + + *plugin_paths* should be a list of paths to search for plugins. If not + specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`. + + *cache* should be an instance of a cache that fulfils the + :class:`ftrack_api.cache.Cache` interface and will be used as the cache + for the session. It can also be a callable that will be called with the + session instance as sole argument. The callable should return ``None`` + if a suitable cache could not be configured, but session instantiation + can continue safely. + + .. note:: + + The session will add the specified cache to a pre-configured layered + cache that specifies the top level cache as a + :class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary + to construct a separate memory cache for typical behaviour. Working + around this behaviour or removing the memory cache can lead to + unexpected behaviour. + + *cache_key_maker* should be an instance of a key maker that fulfils the + :class:`ftrack_api.cache.KeyMaker` interface and will be used to + generate keys for objects being stored in the *cache*. If not specified, + a :class:`~ftrack_api.cache.StringKeyMaker` will be used. + + If *auto_connect_event_hub* is True then embedded event hub will be + automatically connected to the event server and allow for publishing and + subscribing to **non-local** events. If False, then only publishing and + subscribing to **local** events will be possible until the hub is + manually connected using :meth:`EventHub.connect + `. + + .. note:: + + The event hub connection is performed in a background thread to + improve session startup time. If a registered plugin requires a + connected event hub then it should check the event hub connection + status explicitly. Subscribing to events does *not* require a + connected event hub. + + Enable schema caching by setting *schema_cache_path* to a folder path. + If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to + determine the path to store cache in. If the environment variable is + also not specified then a local cache directory will be used. Set to + `False` to disable schema caching entirely. + + *plugin_arguments* should be an optional mapping (dict) of keyword + arguments to pass to plugin register functions upon discovery. If a + discovered plugin has a signature that is incompatible with the passed + arguments, the discovery mechanism will attempt to reduce the passed + arguments to only those that the plugin accepts. Note that a warning + will be logged in this case. + + *timeout* how long to wait for server to respond, default is 60 + seconds. + + ''' + super(Session, self).__init__() + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + self._closed = False + + if server_url is None: + server_url = os.environ.get('FTRACK_SERVER') + + if not server_url: + raise TypeError( + 'Required "server_url" not specified. Pass as argument or set ' + 'in environment variable FTRACK_SERVER.' + ) + + self._server_url = server_url + + if api_key is None: + api_key = os.environ.get( + 'FTRACK_API_KEY', + # Backwards compatibility + os.environ.get('FTRACK_APIKEY') + ) + + if not api_key: + raise TypeError( + 'Required "api_key" not specified. Pass as argument or set in ' + 'environment variable FTRACK_API_KEY.' + ) + + self._api_key = api_key + + if api_user is None: + api_user = os.environ.get('FTRACK_API_USER') + if not api_user: + try: + api_user = getpass.getuser() + except Exception: + pass + + if not api_user: + raise TypeError( + 'Required "api_user" not specified. Pass as argument, set in ' + 'environment variable FTRACK_API_USER or one of the standard ' + 'environment variables used by Python\'s getpass module.' + ) + + self._api_user = api_user + + # Currently pending operations. + self.recorded_operations = ftrack_api.operation.Operations() + self.record_operations = True + + self.cache_key_maker = cache_key_maker + if self.cache_key_maker is None: + self.cache_key_maker = ftrack_api.cache.StringKeyMaker() + + # Enforce always having a memory cache at top level so that the same + # in-memory instance is returned from session. + self.cache = ftrack_api.cache.LayeredCache([ + ftrack_api.cache.MemoryCache() + ]) + + if cache is not None: + if callable(cache): + cache = cache(self) + + if cache is not None: + self.cache.caches.append(cache) + + self._managed_request = None + self._request = requests.Session() + self._request.auth = SessionAuthentication( + self._api_key, self._api_user + ) + self.request_timeout = timeout + + self.auto_populate = auto_populate + + # Fetch server information and in doing so also check credentials. + self._server_information = self._fetch_server_information() + + # Now check compatibility of server based on retrieved information. + self.check_server_compatibility() + + # Construct event hub and load plugins. + self._event_hub = ftrack_api.event.hub.EventHub( + self._server_url, + self._api_user, + self._api_key, + ) + + self._auto_connect_event_hub_thread = None + if auto_connect_event_hub: + # Connect to event hub in background thread so as not to block main + # session usage waiting for event hub connection. + self._auto_connect_event_hub_thread = threading.Thread( + target=self._event_hub.connect + ) + self._auto_connect_event_hub_thread.daemon = True + self._auto_connect_event_hub_thread.start() + + # Register to auto-close session on exit. + atexit.register(WeakMethod(self.close)) + + self._plugin_paths = plugin_paths + if self._plugin_paths is None: + self._plugin_paths = os.environ.get( + 'FTRACK_EVENT_PLUGIN_PATH', '' + ).split(os.pathsep) + + self._discover_plugins(plugin_arguments=plugin_arguments) + + # TODO: Make schemas read-only and non-mutable (or at least without + # rebuilding types)? + if schema_cache_path is not False: + if schema_cache_path is None: + schema_cache_path = appdirs.user_cache_dir() + schema_cache_path = os.environ.get( + 'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path + ) + + schema_cache_path = os.path.join( + schema_cache_path, 'ftrack_api_schema_cache.json' + ) + + self.schemas = self._load_schemas(schema_cache_path) + self.types = self._build_entity_type_classes(self.schemas) + + ftrack_api._centralized_storage_scenario.register(self) + + self._configure_locations() + self.event_hub.publish( + ftrack_api.event.base.Event( + topic='ftrack.api.session.ready', + data=dict( + session=self + ) + ), + synchronous=True + ) + + def __enter__(self): + '''Return session as context manager.''' + return self + + def __exit__(self, exception_type, exception_value, traceback): + '''Exit session context, closing session in process.''' + self.close() + + @property + def _request(self): + '''Return request session. + + Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has + been closed and connection unavailable. + + ''' + if self._managed_request is None: + raise ftrack_api.exception.ConnectionClosedError() + + return self._managed_request + + @_request.setter + def _request(self, value): + '''Set request session to *value*.''' + self._managed_request = value + + @property + def closed(self): + '''Return whether session has been closed.''' + return self._closed + + @property + def server_information(self): + '''Return server information such as server version.''' + return self._server_information.copy() + + @property + def server_url(self): + '''Return server ulr used for session.''' + return self._server_url + + @property + def api_user(self): + '''Return username used for session.''' + return self._api_user + + @property + def api_key(self): + '''Return API key used for session.''' + return self._api_key + + @property + def event_hub(self): + '''Return event hub.''' + return self._event_hub + + @property + def _local_cache(self): + '''Return top level memory cache.''' + return self.cache.caches[0] + + def check_server_compatibility(self): + '''Check compatibility with connected server.''' + server_version = self.server_information.get('version') + if server_version is None: + raise ftrack_api.exception.ServerCompatibilityError( + 'Could not determine server version.' + ) + + # Perform basic version check. + if server_version != 'dev': + min_server_version = '3.3.11' + if ( + distutils.version.LooseVersion(min_server_version) + > distutils.version.LooseVersion(server_version) + ): + raise ftrack_api.exception.ServerCompatibilityError( + 'Server version {0} incompatible with this version of the ' + 'API which requires a server version >= {1}'.format( + server_version, + min_server_version + ) + ) + + def close(self): + '''Close session. + + Close connections to server. Clear any pending operations and local + cache. + + Use this to ensure that session is cleaned up properly after use. + + ''' + if self.closed: + self.logger.debug('Session already closed.') + return + + self._closed = True + + self.logger.debug('Closing session.') + if self.recorded_operations: + self.logger.warning( + 'Closing session with pending operations not persisted.' + ) + + # Clear pending operations. + self.recorded_operations.clear() + + # Clear top level cache (expected to be enforced memory cache). + self._local_cache.clear() + + # Close connections. + self._request.close() + self._request = None + + try: + self.event_hub.disconnect() + if self._auto_connect_event_hub_thread: + self._auto_connect_event_hub_thread.join() + except ftrack_api.exception.EventHubConnectionError: + pass + + self.logger.debug('Session closed.') + + def reset(self): + '''Reset session clearing local state. + + Clear all pending operations and expunge all entities from session. + + Also clear the local cache. If the cache used by the session is a + :class:`~ftrack_api.cache.LayeredCache` then only clear top level cache. + Otherwise, clear the entire cache. + + Plugins are not rediscovered or reinitialised, but certain plugin events + are re-emitted to properly configure session aspects that are dependant + on cache (such as location plugins). + + .. warning:: + + Previously attached entities are not reset in memory and will retain + their state, but should not be used. Doing so will cause errors. + + ''' + if self.recorded_operations: + self.logger.warning( + 'Resetting session with pending operations not persisted.' + ) + + # Clear pending operations. + self.recorded_operations.clear() + + # Clear top level cache (expected to be enforced memory cache). + self._local_cache.clear() + + # Re-configure certain session aspects that may be dependant on cache. + self._configure_locations() + + self.event_hub.publish( + ftrack_api.event.base.Event( + topic='ftrack.api.session.reset', + data=dict( + session=self + ) + ), + synchronous=True + ) + + def auto_populating(self, auto_populate): + '''Temporarily set auto populate to *auto_populate*. + + The current setting will be restored automatically when done. + + Example:: + + with session.auto_populating(False): + print entity['name'] + + ''' + return AutoPopulatingContext(self, auto_populate) + + def operation_recording(self, record_operations): + '''Temporarily set operation recording to *record_operations*. + + The current setting will be restored automatically when done. + + Example:: + + with session.operation_recording(False): + entity['name'] = 'change_not_recorded' + + ''' + return OperationRecordingContext(self, record_operations) + + @property + def created(self): + '''Return list of newly created entities.''' + entities = list(self._local_cache.values()) + states = ftrack_api.inspection.states(entities) + + return [ + entity for (entity, state) in zip(entities, states) + if state is ftrack_api.symbol.CREATED + ] + + @property + def modified(self): + '''Return list of locally modified entities.''' + entities = list(self._local_cache.values()) + states = ftrack_api.inspection.states(entities) + + return [ + entity for (entity, state) in zip(entities, states) + if state is ftrack_api.symbol.MODIFIED + ] + + @property + def deleted(self): + '''Return list of deleted entities.''' + entities = list(self._local_cache.values()) + states = ftrack_api.inspection.states(entities) + + return [ + entity for (entity, state) in zip(entities, states) + if state is ftrack_api.symbol.DELETED + ] + + def reset_remote(self, reset_type, entity=None): + '''Perform a server side reset. + + *reset_type* is a server side supported reset type, + passing the optional *entity* to perform the option upon. + + Please refer to ftrack documentation for a complete list of + supported server side reset types. + ''' + + payload = { + 'action': 'reset_remote', + 'reset_type': reset_type + } + + if entity is not None: + payload.update({ + 'entity_type': entity.entity_type, + 'entity_key': entity.get('id') + }) + + result = self.call( + [payload] + ) + + return result[0]['data'] + + def create(self, entity_type, data=None, reconstructing=False): + '''Create and return an entity of *entity_type* with initial *data*. + + If specified, *data* should be a dictionary of key, value pairs that + should be used to populate attributes on the entity. + + If *reconstructing* is False then create a new entity setting + appropriate defaults for missing data. If True then reconstruct an + existing entity. + + Constructed entity will be automatically :meth:`merged ` + into the session. + + ''' + entity = self._create(entity_type, data, reconstructing=reconstructing) + entity = self.merge(entity) + return entity + + def _create(self, entity_type, data, reconstructing): + '''Create and return an entity of *entity_type* with initial *data*.''' + try: + EntityTypeClass = self.types[entity_type] + except KeyError: + raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type) + + return EntityTypeClass(self, data=data, reconstructing=reconstructing) + + def ensure(self, entity_type, data, identifying_keys=None): + '''Retrieve entity of *entity_type* with *data*, creating if necessary. + + *data* should be a dictionary of the same form passed to :meth:`create`. + + By default, check for an entity that has matching *data*. If + *identifying_keys* is specified as a list of keys then only consider the + values from *data* for those keys when searching for existing entity. If + *data* is missing an identifying key then raise :exc:`KeyError`. + + If no *identifying_keys* specified then use all of the keys from the + passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be + determined. + + Each key should be a string. + + .. note:: + + Currently only top level scalars supported. To ensure an entity by + looking at relationships, manually issue the :meth:`query` and + :meth:`create` calls. + + If more than one entity matches the determined filter criteria then + raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`. + + If no matching entity found then create entity using supplied *data*. + + If a matching entity is found, then update it if necessary with *data*. + + .. note:: + + If entity created or updated then a :meth:`commit` will be issued + automatically. If this behaviour is undesired, perform the + :meth:`query` and :meth:`create` calls manually. + + Return retrieved or created entity. + + Example:: + + # First time, a new entity with `username=martin` is created. + entity = session.ensure('User', {'username': 'martin'}) + + # After that, the existing entity is retrieved. + entity = session.ensure('User', {'username': 'martin'}) + + # When existing entity retrieved, entity may also be updated to + # match supplied data. + entity = session.ensure( + 'User', {'username': 'martin', 'email': 'martin@example.com'} + ) + + ''' + if not identifying_keys: + identifying_keys = list(data.keys()) + + self.logger.debug(L( + 'Ensuring entity {0!r} with data {1!r} using identifying keys ' + '{2!r}', entity_type, data, identifying_keys + )) + + if not identifying_keys: + raise ValueError( + 'Could not determine any identifying data to check against ' + 'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}' + .format(entity_type, data, identifying_keys) + ) + + expression = '{0} where'.format(entity_type) + criteria = [] + for identifying_key in identifying_keys: + value = data[identifying_key] + + if isinstance(value, string_types): + value = '"{0}"'.format(value) + + elif isinstance( + value, (arrow.Arrow, datetime.datetime, datetime.date) + ): + # Server does not store microsecond or timezone currently so + # need to strip from query. + # TODO: When datetime handling improved, update this logic. + value = ( + arrow.get(value).naive.replace(microsecond=0).isoformat() + ) + value = '"{0}"'.format(value) + + criteria.append('{0} is {1}'.format(identifying_key, value)) + + expression = '{0} {1}'.format( + expression, ' and '.join(criteria) + ) + + try: + entity = self.query(expression).one() + + except ftrack_api.exception.NoResultFoundError: + self.logger.debug('Creating entity as did not already exist.') + + # Create entity. + entity = self.create(entity_type, data) + self.commit() + + else: + self.logger.debug('Retrieved matching existing entity.') + + # Update entity if required. + updated = False + for key, target_value in list(data.items()): + if entity[key] != target_value: + entity[key] = target_value + updated = True + + if updated: + self.logger.debug('Updating existing entity to match new data.') + self.commit() + + return entity + + def delete(self, entity): + '''Mark *entity* for deletion.''' + if self.record_operations: + self.recorded_operations.push( + ftrack_api.operation.DeleteEntityOperation( + entity.entity_type, + ftrack_api.inspection.primary_key(entity) + ) + ) + + def get(self, entity_type, entity_key): + '''Return entity of *entity_type* with unique *entity_key*. + + First check for an existing entry in the configured cache, otherwise + issue a query to the server. + + If no matching entity found, return None. + + ''' + self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key)) + + primary_key_definition = self.types[entity_type].primary_key_attributes + if isinstance(entity_key, string_types): + entity_key = [entity_key] + + if len(entity_key) != len(primary_key_definition): + raise ValueError( + 'Incompatible entity_key {0!r} supplied. Entity type {1} ' + 'expects a primary key composed of {2} values ({3}).' + .format( + entity_key, entity_type, len(primary_key_definition), + ', '.join(primary_key_definition) + ) + ) + + entity = None + try: + entity = self._get(entity_type, entity_key) + + + except KeyError: + + # Query for matching entity. + self.logger.debug( + 'Entity not present in cache. Issuing new query.' + ) + condition = [] + for key, value in zip(primary_key_definition, entity_key): + condition.append('{0} is "{1}"'.format(key, value)) + + expression = '{0} where ({1})'.format( + entity_type, ' and '.join(condition) + ) + + results = self.query(expression).all() + if results: + entity = results[0] + + return entity + + def _get(self, entity_type, entity_key): + '''Return cached entity of *entity_type* with unique *entity_key*. + + Raise :exc:`KeyError` if no such entity in the cache. + + ''' + # Check cache for existing entity emulating + # ftrack_api.inspection.identity result object to pass to key maker. + cache_key = self.cache_key_maker.key( + (str(entity_type), list(map(str, entity_key))) + ) + self.logger.debug(L( + 'Checking cache for entity with key {0}', cache_key + )) + entity = self.cache.get(cache_key) + self.logger.debug(L( + 'Retrieved existing entity from cache: {0} at {1}', + entity, id(entity) + )) + + return entity + + def query(self, expression, page_size=500): + '''Query against remote data according to *expression*. + + *expression* is not executed directly. Instead return an + :class:`ftrack_api.query.QueryResult` instance that will execute remote + call on access. + + *page_size* specifies the maximum page size that the returned query + result object should be configured with. + + .. seealso:: :ref:`querying` + + ''' + self.logger.debug(L('Query {0!r}', expression)) + + # Add in sensible projections if none specified. Note that this is + # done here rather than on the server to allow local modification of the + # schema setting to include commonly used custom attributes for example. + # TODO: Use a proper parser perhaps? + if not expression.startswith('select'): + entity_type = expression.split(' ', 1)[0] + EntityTypeClass = self.types[entity_type] + projections = EntityTypeClass.default_projections + + expression = 'select {0} from {1}'.format( + ', '.join(projections), + expression + ) + + query_result = ftrack_api.query.QueryResult( + self, expression, page_size=page_size + ) + return query_result + + def _query(self, expression): + '''Execute *query* and return (records, metadata). + + Records will be a list of entities retrieved via the query and metadata + a dictionary of accompanying information about the result set. + + ''' + # TODO: Actually support batching several queries together. + # TODO: Should batches have unique ids to match them up later. + batch = [{ + 'action': 'query', + 'expression': expression + }] + + # TODO: When should this execute? How to handle background=True? + results = self.call(batch) + + # Merge entities into local cache and return merged entities. + data = [] + merged = dict() + for entity in results[0]['data']: + data.append(self._merge_recursive(entity, merged)) + + return data, results[0]['metadata'] + + def merge(self, value, merged=None): + '''Merge *value* into session and return merged value. + + *merged* should be a mapping to record merges during run and should be + used to avoid infinite recursion. If not set will default to a + dictionary. + + ''' + if merged is None: + merged = {} + + with self.operation_recording(False): + return self._merge(value, merged) + + def _merge(self, value, merged): + '''Return merged *value*.''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if isinstance(value, ftrack_api.entity.base.Entity): + log_debug and self.logger.debug( + 'Merging entity into session: {0} at {1}' + .format(value, id(value)) + ) + + return self._merge_entity(value, merged=merged) + + elif isinstance(value, ftrack_api.collection.Collection): + log_debug and self.logger.debug( + 'Merging collection into session: {0!r} at {1}' + .format(value, id(value)) + ) + + merged_collection = [] + for entry in value: + merged_collection.append( + self._merge(entry, merged=merged) + ) + + return merged_collection + + elif isinstance(value, ftrack_api.collection.MappedCollectionProxy): + log_debug and self.logger.debug( + 'Merging mapped collection into session: {0!r} at {1}' + .format(value, id(value)) + ) + + merged_collection = [] + for entry in value.collection: + merged_collection.append( + self._merge(entry, merged=merged) + ) + + return merged_collection + + else: + return value + + def _merge_recursive(self, entity, merged=None): + '''Merge *entity* and all its attributes recursivly.''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if merged is None: + merged = {} + + attached = self.merge(entity, merged) + + for attribute in entity.attributes: + # Remote attributes. + remote_value = attribute.get_remote_value(entity) + + if isinstance( + remote_value, + ( + ftrack_api.entity.base.Entity, + ftrack_api.collection.Collection, + ftrack_api.collection.MappedCollectionProxy + ) + ): + log_debug and self.logger.debug( + 'Merging remote value for attribute {0}.'.format(attribute) + ) + + if isinstance(remote_value, ftrack_api.entity.base.Entity): + self._merge_recursive(remote_value, merged=merged) + + elif isinstance( + remote_value, ftrack_api.collection.Collection + ): + for entry in remote_value: + self._merge_recursive(entry, merged=merged) + + elif isinstance( + remote_value, ftrack_api.collection.MappedCollectionProxy + ): + for entry in remote_value.collection: + self._merge_recursive(entry, merged=merged) + + return attached + + def _merge_entity(self, entity, merged=None): + '''Merge *entity* into session returning merged entity. + + Merge is recursive so any references to other entities will also be + merged. + + *entity* will never be modified in place. Ensure that the returned + merged entity instance is used. + + ''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if merged is None: + merged = {} + + with self.auto_populating(False): + entity_key = self.cache_key_maker.key( + ftrack_api.inspection.identity(entity) + ) + + # Check whether this entity has already been processed. + attached_entity = merged.get(entity_key) + if attached_entity is not None: + log_debug and self.logger.debug( + 'Entity already processed for key {0} as {1} at {2}' + .format(entity_key, attached_entity, id(attached_entity)) + ) + + return attached_entity + else: + log_debug and self.logger.debug( + 'Entity not already processed for key {0}.' + .format(entity_key) + ) + + # Check for existing instance of entity in cache. + log_debug and self.logger.debug( + 'Checking for entity in cache with key {0}'.format(entity_key) + ) + try: + attached_entity = self.cache.get(entity_key) + + log_debug and self.logger.debug( + 'Retrieved existing entity from cache: {0} at {1}' + .format(attached_entity, id(attached_entity)) + ) + + except KeyError: + # Construct new minimal instance to store in cache. + attached_entity = self._create( + entity.entity_type, {}, reconstructing=True + ) + + log_debug and self.logger.debug( + 'Entity not present in cache. Constructed new instance: ' + '{0} at {1}'.format(attached_entity, id(attached_entity)) + ) + + # Mark entity as seen to avoid infinite loops. + merged[entity_key] = attached_entity + + changes = attached_entity.merge(entity, merged=merged) + if changes: + self.cache.set(entity_key, attached_entity) + self.logger.debug('Cache updated with merged entity.') + + else: + self.logger.debug( + 'Cache not updated with merged entity as no differences ' + 'detected.' + ) + + return attached_entity + + def populate(self, entities, projections): + '''Populate *entities* with attributes specified by *projections*. + + Any locally set values included in the *projections* will not be + overwritten with the retrieved remote value. If this 'synchronise' + behaviour is required, first clear the relevant values on the entity by + setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will + have the same effect:: + + >>> print(user['username']) + martin + >>> del user['username'] + >>> print(user['username']) + Symbol(NOT_SET) + + .. note:: + + Entities that have been created and not yet persisted will be + skipped as they have no remote values to fetch. + + ''' + self.logger.debug(L( + 'Populate {0!r} projections for {1}.', projections, entities + )) + + if not isinstance( + entities, (list, tuple, ftrack_api.query.QueryResult) + ): + entities = [entities] + + # TODO: How to handle a mixed collection of different entity types + # Should probably fail, but need to consider handling hierarchies such + # as User and Group both deriving from Resource. Actually, could just + # proceed and ignore projections that are not present in entity type. + + entities_to_process = [] + + for entity in entities: + if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED: + # Created entities that are not yet persisted have no remote + # values. Don't raise an error here as it is reasonable to + # iterate over an entities properties and see that some of them + # are NOT_SET. + self.logger.debug(L( + 'Skipping newly created entity {0!r} for population as no ' + 'data will exist in the remote for this entity yet.', entity + )) + continue + + entities_to_process.append(entity) + + if entities_to_process: + reference_entity = entities_to_process[0] + entity_type = reference_entity.entity_type + query = 'select {0} from {1}'.format(projections, entity_type) + + primary_key_definition = reference_entity.primary_key_attributes + entity_keys = [ + list(ftrack_api.inspection.primary_key(entity).values()) + for entity in entities_to_process + ] + + if len(primary_key_definition) > 1: + # Composite keys require full OR syntax unfortunately. + conditions = [] + for entity_key in entity_keys: + condition = [] + for key, value in zip(primary_key_definition, entity_key): + condition.append('{0} is "{1}"'.format(key, value)) + + conditions.append('({0})'.format('and '.join(condition))) + + query = '{0} where {1}'.format(query, ' or '.join(conditions)) + + else: + primary_key = primary_key_definition[0] + + if len(entity_keys) > 1: + query = '{0} where {1} in ({2})'.format( + query, primary_key, + ','.join([ + str(entity_key[0]) for entity_key in entity_keys + ]) + ) + else: + query = '{0} where {1} is {2}'.format( + query, primary_key, str(entity_keys[0][0]) + ) + + result = self.query(query) + + # Fetch all results now. Doing so will cause them to populate the + # relevant entities in the cache. + result.all() + + # TODO: Should we check that all requested attributes were + # actually populated? If some weren't would we mark that to avoid + # repeated calls or perhaps raise an error? + + # TODO: Make atomic. + def commit(self): + '''Commit all local changes to the server.''' + batch = [] + + with self.auto_populating(False): + for operation in self.recorded_operations: + + # Convert operation to payload. + if isinstance( + operation, ftrack_api.operation.CreateEntityOperation + ): + # At present, data payload requires duplicating entity + # type in data and also ensuring primary key added. + entity_data = { + '__entity_type__': operation.entity_type, + } + entity_data.update(operation.entity_key) + entity_data.update(operation.entity_data) + + payload = OperationPayload({ + 'action': 'create', + 'entity_type': operation.entity_type, + 'entity_key': list(operation.entity_key.values()), + 'entity_data': entity_data + }) + + elif isinstance( + operation, ftrack_api.operation.UpdateEntityOperation + ): + entity_data = { + # At present, data payload requires duplicating entity + # type. + '__entity_type__': operation.entity_type, + operation.attribute_name: operation.new_value + } + + payload = OperationPayload({ + 'action': 'update', + 'entity_type': operation.entity_type, + 'entity_key': list(operation.entity_key.values()), + 'entity_data': entity_data + }) + + elif isinstance( + operation, ftrack_api.operation.DeleteEntityOperation + ): + payload = OperationPayload({ + 'action': 'delete', + 'entity_type': operation.entity_type, + 'entity_key': list(operation.entity_key.values()) + }) + + else: + raise ValueError( + 'Cannot commit. Unrecognised operation type {0} ' + 'detected.'.format(type(operation)) + ) + + batch.append(payload) + + # Optimise batch. + # TODO: Might be better to perform these on the operations list instead + # so all operation contextual information available. + + # If entity was created and deleted in one batch then remove all + # payloads for that entity. + created = set() + deleted = set() + + for payload in batch: + if payload['action'] == 'create': + created.add( + (payload['entity_type'], str(payload['entity_key'])) + ) + + elif payload['action'] == 'delete': + deleted.add( + (payload['entity_type'], str(payload['entity_key'])) + ) + + created_then_deleted = deleted.intersection(created) + if created_then_deleted: + optimised_batch = [] + for payload in batch: + entity_type = payload.get('entity_type') + entity_key = str(payload.get('entity_key')) + + if (entity_type, entity_key) in created_then_deleted: + continue + + optimised_batch.append(payload) + + batch = optimised_batch + + # Remove early update operations so that only last operation on + # attribute is applied server side. + updates_map = set() + for payload in reversed(batch): + if payload['action'] in ('update', ): + for key, value in list(payload['entity_data'].items()): + if key == '__entity_type__': + continue + + identity = ( + payload['entity_type'], str(payload['entity_key']), key + ) + if identity in updates_map: + del payload['entity_data'][key] + else: + updates_map.add(identity) + + # Remove NOT_SET values from entity_data. + for payload in batch: + entity_data = payload.get('entity_data', {}) + for key, value in list(entity_data.items()): + if value is ftrack_api.symbol.NOT_SET: + del entity_data[key] + + # Remove payloads with redundant entity_data. + optimised_batch = [] + for payload in batch: + entity_data = payload.get('entity_data') + if entity_data is not None: + keys = list(entity_data.keys()) + if not keys or keys == ['__entity_type__']: + continue + + optimised_batch.append(payload) + + batch = optimised_batch + + # Collapse updates that are consecutive into one payload. Also, collapse + # updates that occur immediately after creation into the create payload. + optimised_batch = [] + previous_payload = None + + for payload in batch: + if ( + previous_payload is not None + and payload['action'] == 'update' + and previous_payload['action'] in ('create', 'update') + and previous_payload['entity_type'] == payload['entity_type'] + and previous_payload['entity_key'] == payload['entity_key'] + ): + previous_payload['entity_data'].update(payload['entity_data']) + continue + + else: + optimised_batch.append(payload) + previous_payload = payload + + batch = optimised_batch + + # Process batch. + if batch: + result = self.call(batch) + + # Clear recorded operations. + self.recorded_operations.clear() + + # As optimisation, clear local values which are not primary keys to + # avoid redundant merges when merging references. Note: primary keys + # remain as needed for cache retrieval on new entities. + with self.auto_populating(False): + with self.operation_recording(False): + for entity in list(self._local_cache.values()): + for attribute in entity: + if attribute not in entity.primary_key_attributes: + del entity[attribute] + + # Process results merging into cache relevant data. + for entry in result: + + if entry['action'] in ('create', 'update'): + # Merge returned entities into local cache. + self.merge(entry['data']) + + elif entry['action'] == 'delete': + # TODO: Detach entity - need identity returned? + # TODO: Expunge entity from cache. + pass + # Clear remaining local state, including local values for primary + # keys on entities that were merged. + with self.auto_populating(False): + with self.operation_recording(False): + for entity in list(self._local_cache.values()): + entity.clear() + + def rollback(self): + '''Clear all recorded operations and local state. + + Typically this would be used following a failed :meth:`commit` in order + to revert the session to a known good state. + + Newly created entities not yet persisted will be detached from the + session / purged from cache and no longer contribute, but the actual + objects are not deleted from memory. They should no longer be used and + doing so could cause errors. + + ''' + with self.auto_populating(False): + with self.operation_recording(False): + + # Detach all newly created entities and remove from cache. This + # is done because simply clearing the local values of newly + # created entities would result in entities with no identity as + # primary key was local while not persisted. In addition, it + # makes no sense for failed created entities to exist in session + # or cache. + for operation in self.recorded_operations: + if isinstance( + operation, ftrack_api.operation.CreateEntityOperation + ): + entity_key = str(( + str(operation.entity_type), + list(operation.entity_key.values()) + )) + try: + self.cache.remove(entity_key) + except KeyError: + pass + + # Clear locally stored modifications on remaining entities. + for entity in list(self._local_cache.values()): + entity.clear() + + self.recorded_operations.clear() + + def _fetch_server_information(self): + '''Return server information.''' + result = self.call([{'action': 'query_server_information'}]) + return result[0] + + def _discover_plugins(self, plugin_arguments=None): + '''Find and load plugins in search paths. + + Each discovered module should implement a register function that + accepts this session as first argument. Typically the function should + register appropriate event listeners against the session's event hub. + + def register(session): + session.event_hub.subscribe( + 'topic=ftrack.api.session.construct-entity-type', + construct_entity_type + ) + + *plugin_arguments* should be an optional mapping of keyword arguments + and values to pass to plugin register functions upon discovery. + + ''' + plugin_arguments = plugin_arguments or {} + ftrack_api.plugin.discover( + self._plugin_paths, [self], plugin_arguments + ) + + def _read_schemas_from_cache(self, schema_cache_path): + '''Return schemas and schema hash from *schema_cache_path*. + + *schema_cache_path* should be the path to the file containing the + schemas in JSON format. + + ''' + self.logger.debug(L( + 'Reading schemas from cache {0!r}', schema_cache_path + )) + + if not os.path.exists(schema_cache_path): + self.logger.info(L( + 'Cache file not found at {0!r}.', schema_cache_path + )) + + return [], None + + with open(schema_cache_path, 'r') as schema_file: + schemas = json.load(schema_file) + hash_ = hashlib.md5( + json.dumps(schemas, sort_keys=True).encode('utf-8') + ).hexdigest() + + return schemas, hash_ + + def _write_schemas_to_cache(self, schemas, schema_cache_path): + '''Write *schemas* to *schema_cache_path*. + + *schema_cache_path* should be a path to a file that the schemas can be + written to in JSON format. + + ''' + self.logger.debug(L( + 'Updating schema cache {0!r} with new schemas.', schema_cache_path + )) + + with open(schema_cache_path, 'w') as local_cache_file: + json.dump(schemas, local_cache_file, indent=4) + + def _load_schemas(self, schema_cache_path): + '''Load schemas. + + First try to load schemas from cache at *schema_cache_path*. If the + cache is not available or the cache appears outdated then load schemas + from server and store fresh copy in cache. + + If *schema_cache_path* is set to `False`, always load schemas from + server bypassing cache. + + ''' + local_schema_hash = None + schemas = [] + + if schema_cache_path: + try: + schemas, local_schema_hash = self._read_schemas_from_cache( + schema_cache_path + ) + except (IOError, TypeError, AttributeError, ValueError): + # Catch any known exceptions when trying to read the local + # schema cache to prevent API from being unusable. + self.logger.exception(L( + 'Schema cache could not be loaded from {0!r}', + schema_cache_path + )) + + # Use `dictionary.get` to retrieve hash to support older version of + # ftrack server not returning a schema hash. + server_hash = self._server_information.get( + 'schema_hash', False + ) + if local_schema_hash != server_hash: + self.logger.debug(L( + 'Loading schemas from server due to hash not matching.' + 'Local: {0!r} != Server: {1!r}', local_schema_hash, server_hash + )) + schemas = self.call([{'action': 'query_schemas'}])[0] + + if schema_cache_path: + try: + self._write_schemas_to_cache(schemas, schema_cache_path) + except (IOError, TypeError): + self.logger.exception(L( + 'Failed to update schema cache {0!r}.', + schema_cache_path + )) + + else: + self.logger.debug(L( + 'Using cached schemas from {0!r}', schema_cache_path + )) + + return schemas + + def _build_entity_type_classes(self, schemas): + '''Build default entity type classes.''' + fallback_factory = ftrack_api.entity.factory.StandardFactory() + classes = {} + + for schema in schemas: + results = self.event_hub.publish( + ftrack_api.event.base.Event( + topic='ftrack.api.session.construct-entity-type', + data=dict( + schema=schema, + schemas=schemas + ) + ), + synchronous=True + ) + + results = [result for result in results if result is not None] + + if not results: + self.logger.debug(L( + 'Using default StandardFactory to construct entity type ' + 'class for "{0}"', schema['id'] + )) + entity_type_class = fallback_factory.create(schema) + + elif len(results) > 1: + raise ValueError( + 'Expected single entity type to represent schema "{0}" but ' + 'received {1} entity types instead.' + .format(schema['id'], len(results)) + ) + + else: + entity_type_class = results[0] + + classes[entity_type_class.entity_type] = entity_type_class + + return classes + + def _configure_locations(self): + '''Configure locations.''' + # First configure builtin locations, by injecting them into local cache. + + # Origin. + location = self.create( + 'Location', + data=dict( + name='ftrack.origin', + id=ftrack_api.symbol.ORIGIN_LOCATION_ID + ), + reconstructing=True + ) + ftrack_api.mixin( + location, ftrack_api.entity.location.OriginLocationMixin, + name='OriginLocation' + ) + location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='') + location.structure = ftrack_api.structure.origin.OriginStructure() + location.priority = 100 + + # Unmanaged. + location = self.create( + 'Location', + data=dict( + name='ftrack.unmanaged', + id=ftrack_api.symbol.UNMANAGED_LOCATION_ID + ), + reconstructing=True + ) + ftrack_api.mixin( + location, ftrack_api.entity.location.UnmanagedLocationMixin, + name='UnmanagedLocation' + ) + location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='') + location.structure = ftrack_api.structure.origin.OriginStructure() + # location.resource_identifier_transformer = ( + # ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session) + # ) + location.priority = 90 + + # Review. + location = self.create( + 'Location', + data=dict( + name='ftrack.review', + id=ftrack_api.symbol.REVIEW_LOCATION_ID + ), + reconstructing=True + ) + ftrack_api.mixin( + location, ftrack_api.entity.location.UnmanagedLocationMixin, + name='UnmanagedLocation' + ) + location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='') + location.structure = ftrack_api.structure.origin.OriginStructure() + location.priority = 110 + + # Server. + location = self.create( + 'Location', + data=dict( + name='ftrack.server', + id=ftrack_api.symbol.SERVER_LOCATION_ID + ), + reconstructing=True + ) + ftrack_api.mixin( + location, ftrack_api.entity.location.ServerLocationMixin, + name='ServerLocation' + ) + location.accessor = ftrack_api.accessor.server._ServerAccessor( + session=self + ) + location.structure = ftrack_api.structure.entity_id.EntityIdStructure() + location.priority = 150 + + # Master location based on server scenario. + storage_scenario = self.server_information.get('storage_scenario') + + if ( + storage_scenario and + storage_scenario.get('scenario') + ): + self.event_hub.publish( + ftrack_api.event.base.Event( + topic='ftrack.storage-scenario.activate', + data=dict( + storage_scenario=storage_scenario + ) + ), + synchronous=True + ) + + # Next, allow further configuration of locations via events. + self.event_hub.publish( + ftrack_api.event.base.Event( + topic='ftrack.api.session.configure-location', + data=dict( + session=self + ) + ), + synchronous=True + ) + + def call(self, data): + '''Make request to server with *data* batch describing the actions.''' + url = self._server_url + '/api' + headers = { + 'content-type': 'application/json', + 'accept': 'application/json' + } + data = self.encode(data, entity_attribute_strategy='modified_only') + + self.logger.debug(L('Calling server {0} with {1!r}', url, data)) + + response = self._request.post( + url, + headers=headers, + data=data, + timeout=self.request_timeout, + ) + + self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds())) + + self.logger.debug(L('Response: {0!r}', response.text)) + try: + result = self.decode(response.text) + + except Exception: + error_message = ( + 'Server reported error in unexpected format. Raw error was: {0}' + .format(response.text) + ) + self.logger.exception(error_message) + raise ftrack_api.exception.ServerError(error_message) + + else: + if 'exception' in result: + # Handle exceptions. + error_message = 'Server reported error: {0}({1})'.format( + result['exception'], result['content'] + ) + self.logger.exception(error_message) + raise ftrack_api.exception.ServerError(error_message) + + return result + + def encode(self, data, entity_attribute_strategy='set_only'): + '''Return *data* encoded as JSON formatted string. + + *entity_attribute_strategy* specifies how entity attributes should be + handled. The following strategies are available: + + * *all* - Encode all attributes, loading any that are currently NOT_SET. + * *set_only* - Encode only attributes that are currently set without + loading any from the remote. + * *modified_only* - Encode only attributes that have been modified + locally. + * *persisted_only* - Encode only remote (persisted) attribute values. + + ''' + entity_attribute_strategies = ( + 'all', 'set_only', 'modified_only', 'persisted_only' + ) + if entity_attribute_strategy not in entity_attribute_strategies: + raise ValueError( + 'Unsupported entity_attribute_strategy "{0}". Must be one of ' + '{1}'.format( + entity_attribute_strategy, + ', '.join(entity_attribute_strategies) + ) + ) + + return json.dumps( + data, + sort_keys=True, + default=functools.partial( + self._encode, + entity_attribute_strategy=entity_attribute_strategy + ) + ) + + def _encode(self, item, entity_attribute_strategy='set_only'): + '''Return JSON encodable version of *item*. + + *entity_attribute_strategy* specifies how entity attributes should be + handled. See :meth:`Session.encode` for available strategies. + + ''' + if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)): + return { + '__type__': 'datetime', + 'value': item.isoformat() + } + + if isinstance(item, OperationPayload): + data = dict(list(item.items())) + if "entity_data" in data: + for key, value in list(data["entity_data"].items()): + if isinstance(value, ftrack_api.entity.base.Entity): + data["entity_data"][key] = self.entity_reference(value) + + return data + + if isinstance(item, ftrack_api.entity.base.Entity): + data = self.entity_reference(item) + + with self.auto_populating(True): + + for attribute in item.attributes: + value = ftrack_api.symbol.NOT_SET + + if entity_attribute_strategy == 'all': + value = attribute.get_value(item) + + elif entity_attribute_strategy == 'set_only': + if attribute.is_set(item): + value = attribute.get_local_value(item) + if value is ftrack_api.symbol.NOT_SET: + value = attribute.get_remote_value(item) + + elif entity_attribute_strategy == 'modified_only': + if attribute.is_modified(item): + value = attribute.get_local_value(item) + + elif entity_attribute_strategy == 'persisted_only': + if not attribute.computed: + value = attribute.get_remote_value(item) + + if value is not ftrack_api.symbol.NOT_SET: + if isinstance( + attribute, ftrack_api.attribute.ReferenceAttribute + ): + if isinstance(value, ftrack_api.entity.base.Entity): + value = self.entity_reference(value) + + data[attribute.name] = value + + return data + + if isinstance( + item, ftrack_api.collection.MappedCollectionProxy + ): + # Use proxied collection for serialisation. + item = item.collection + + if isinstance(item, ftrack_api.collection.Collection): + data = [] + for entity in item: + data.append(self.entity_reference(entity)) + + return data + + raise TypeError('{0!r} is not JSON serializable'.format(item)) + + def entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. + + Return a mapping containing the __entity_type__ of the entity along with + the key, value pairs that make up it's primary key. + + ''' + reference = { + '__entity_type__': entity.entity_type + } + with self.auto_populating(False): + reference.update(ftrack_api.inspection.primary_key(entity)) + + return reference + + def decode(self, string): + '''Return decoded JSON *string* as Python object.''' + with self.operation_recording(False): + return json.loads(string, object_hook=self._decode) + + def _decode(self, item): + '''Return *item* transformed into appropriate representation.''' + if isinstance(item, collections.Mapping): + if '__type__' in item: + if item['__type__'] == 'datetime': + item = arrow.get(item['value']) + + elif '__entity_type__' in item: + item = self._create( + item['__entity_type__'], item, reconstructing=True + ) + + return item + + def _get_locations(self, filter_inaccessible=True): + '''Helper to returns locations ordered by priority. + + If *filter_inaccessible* is True then only accessible locations will be + included in result. + + ''' + # Optimise this call. + locations = self.query('Location') + + # Filter. + if filter_inaccessible: + locations = [location for location in locations if location.accessor] + + # Sort by priority. + locations = sorted( + locations, key=lambda location: location.priority + ) + + return locations + + def pick_location(self, component=None): + '''Return suitable location to use. + + If no *component* specified then return highest priority accessible + location. Otherwise, return highest priority accessible location that + *component* is available in. + + Return None if no suitable location could be picked. + + ''' + if component: + return self.pick_locations([component])[0] + + else: + locations = self._get_locations() + if locations: + return locations[0] + else: + return None + + def pick_locations(self, components): + '''Return suitable locations for *components*. + + Return list of locations corresponding to *components* where each + picked location is the highest priority accessible location for that + component. If a component has no location available then its + corresponding entry will be None. + + ''' + candidate_locations = self._get_locations() + availabilities = self.get_component_availabilities( + components, locations=candidate_locations + ) + + locations = [] + for component, availability in zip(components, availabilities): + location = None + + for candidate_location in candidate_locations: + if availability.get(candidate_location['id']) > 0.0: + location = candidate_location + break + + locations.append(location) + + return locations + + def create_component( + self, path, data=None, location='auto' + ): + '''Create a new component from *path* with additional *data* + + .. note:: + + This is a helper method. To create components manually use the + standard :meth:`Session.create` method. + + *path* can be a string representing a filesystem path to the data to + use for the component. The *path* can also be specified as a sequence + string, in which case a sequence component with child components for + each item in the sequence will be created automatically. The accepted + format for a sequence is '{head}{padding}{tail} [{ranges}]'. For + example:: + + '/path/to/file.%04d.ext [1-5, 7, 8, 10-20]' + + .. seealso:: + + `Clique documentation `_ + + *data* should be a dictionary of any additional data to construct the + component with (as passed to :meth:`Session.create`). + + If *location* is specified then automatically add component to that + location. The default of 'auto' will automatically pick a suitable + location to add the component to if one is available. To not add to any + location specifiy locations as None. + + .. note:: + + A :meth:`Session.commit` may be + automatically issued as part of the components registration in the + location. + ''' + if data is None: + data = {} + + if location == 'auto': + # Check if the component name matches one of the ftrackreview + # specific names. Add the component to the ftrack.review location if + # so. This is used to not break backwards compatibility. + if data.get('name') in ( + 'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image' + ): + location = self.get( + 'Location', ftrack_api.symbol.REVIEW_LOCATION_ID + ) + + else: + location = self.pick_location() + + try: + collection = clique.parse(path) + + except ValueError: + # Assume is a single file. + if 'size' not in data: + data['size'] = self._get_filesystem_size(path) + + file_type = self.event_hub.publish( + ftrack_api.event.base.Event( + topic='ftrack.api.session.get-file-type-from-string', + data=dict( + file_path=path + ) + ), + synchronous=True + ) + + # Pick the first valid result or None + file_type = next((result for result in file_type if result), None) + if not file_type: + file_type = os.path.splitext(path)[-1] + + data.setdefault('file_type', file_type) + + return self._create_component( + 'FileComponent', path, data, location + ) + + else: + # Calculate size of container and members. + member_sizes = {} + container_size = data.get('size') + + if container_size is not None: + if len(collection.indexes) > 0: + member_size = int( + round(container_size / len(collection.indexes)) + ) + for item in collection: + member_sizes[item] = member_size + + else: + container_size = 0 + for item in collection: + member_sizes[item] = self._get_filesystem_size(item) + container_size += member_sizes[item] + + # Create sequence component + container_path = collection.format('{head}{padding}{tail}') + data.setdefault('padding', collection.padding) + data.setdefault('file_type', os.path.splitext(container_path)[-1]) + data.setdefault('size', container_size) + + container = self._create_component( + 'SequenceComponent', container_path, data, location=None + ) + + # Create member components for sequence. + for member_path in collection: + member_data = { + 'name': collection.match(member_path).group('index'), + 'container': container, + 'size': member_sizes[member_path], + 'file_type': os.path.splitext(member_path)[-1] + } + + component = self._create_component( + 'FileComponent', member_path, member_data, location=None + ) + container['members'].append(component) + + if location: + origin_location = self.get( + 'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID + ) + location.add_component( + container, origin_location, recursive=True + ) + + return container + + def _create_component(self, entity_type, path, data, location): + '''Create and return component. + + See public function :py:func:`createComponent` for argument details. + + ''' + component = self.create(entity_type, data) + + # Add to special origin location so that it is possible to add to other + # locations. + origin_location = self.get( + 'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID + ) + origin_location.add_component(component, path, recursive=False) + + if location: + location.add_component(component, origin_location, recursive=False) + + return component + + def _get_filesystem_size(self, path): + '''Return size from *path*''' + try: + size = os.path.getsize(path) + except OSError: + size = 0 + + return size + + def get_component_availability(self, component, locations=None): + '''Return availability of *component*. + + If *locations* is set then limit result to availability of *component* + in those *locations*. + + Return a dictionary of {location_id:percentage_availability} + + ''' + return self.get_component_availabilities( + [component], locations=locations + )[0] + + def get_component_availabilities(self, components, locations=None): + '''Return availabilities of *components*. + + If *locations* is set then limit result to availabilities of + *components* in those *locations*. + + Return a list of dictionaries of {location_id:percentage_availability}. + The list indexes correspond to those of *components*. + + ''' + availabilities = [] + + if locations is None: + locations = self.query('Location') + + # Separate components into two lists, those that are containers and + # those that are not, so that queries can be optimised. + standard_components = [] + container_components = [] + + for component in components: + if 'members' in list(component.keys()): + container_components.append(component) + else: + standard_components.append(component) + + # Perform queries. + if standard_components: + self.populate( + standard_components, 'component_locations.location_id' + ) + + if container_components: + self.populate( + container_components, + 'members, component_locations.location_id' + ) + + base_availability = {} + for location in locations: + base_availability[location['id']] = 0.0 + + for component in components: + availability = base_availability.copy() + availabilities.append(availability) + + is_container = 'members' in list(component.keys()) + if is_container and len(component['members']): + member_availabilities = self.get_component_availabilities( + component['members'], locations=locations + ) + multiplier = 1.0 / len(component['members']) + for member, member_availability in zip( + component['members'], member_availabilities + ): + for location_id, ratio in list(member_availability.items()): + availability[location_id] += ( + ratio * multiplier + ) + else: + for component_location in component['component_locations']: + location_id = component_location['location_id'] + if location_id in availability: + availability[location_id] = 100.0 + + for location_id, percentage in list(availability.items()): + # Avoid quantization error by rounding percentage and clamping + # to range 0-100. + adjusted_percentage = round(percentage, 9) + adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0)) + availability[location_id] = adjusted_percentage + + return availabilities + + def get_widget_url(self, name, entity=None, theme=None): + '''Return an authenticated URL for widget with *name* and given options. + + The returned URL will be authenticated using a token which will expire + after 6 minutes. + + *name* should be the name of the widget to return and should be one of + 'info', 'tasks' or 'tasks_browser'. + + Certain widgets require an entity to be specified. If so, specify it by + setting *entity* to a valid entity instance. + + *theme* sets the theme of the widget and can be either 'light' or 'dark' + (defaulting to 'dark' if an invalid option given). + + ''' + operation = { + 'action': 'get_widget_url', + 'name': name, + 'theme': theme + } + if entity: + operation['entity_type'] = entity.entity_type + operation['entity_key'] = ( + list(ftrack_api.inspection.primary_key(entity).values()) + ) + + try: + result = self.call([operation]) + + except ftrack_api.exception.ServerError as error: + # Raise informative error if the action is not supported. + if 'Invalid action u\'get_widget_url\'' in error.message: + raise ftrack_api.exception.ServerCompatibilityError( + 'Server version {0!r} does not support "get_widget_url", ' + 'please update server and try again.'.format( + self.server_information.get('version') + ) + ) + else: + raise + + else: + return result[0]['widget_url'] + + def encode_media(self, media, version_id=None, keep_original='auto'): + '''Return a new Job that encode *media* to make it playable in browsers. + + *media* can be a path to a file or a FileComponent in the ftrack.server + location. + + The job will encode *media* based on the file type and job data contains + information about encoding in the following format:: + + { + 'output': [{ + 'format': 'video/mp4', + 'component_id': 'e2dc0524-b576-11d3-9612-080027331d74' + }, { + 'format': 'image/jpeg', + 'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b' + }], + 'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294', + 'keep_original': True + } + + The output components are associated with the job via the job_components + relation. + + An image component will always be generated if possible that can be used + as a thumbnail. + + If *media* is a file path, a new source component will be created and + added to the ftrack server location and a call to :meth:`commit` will be + issued. If *media* is a FileComponent, it will be assumed to be in + available in the ftrack.server location. + + If *version_id* is specified, the new components will automatically be + associated with the AssetVersion. Otherwise, the components will not + be associated to a version even if the supplied *media* belongs to one. + A server version of 3.3.32 or higher is required for the version_id + argument to function properly. + + If *keep_original* is not set, the original media will be kept if it + is a FileComponent, and deleted if it is a file path. You can specify + True or False to change this behavior. + ''' + if isinstance(media, string_types): + # Media is a path to a file. + server_location = self.get( + 'Location', ftrack_api.symbol.SERVER_LOCATION_ID + ) + if keep_original == 'auto': + keep_original = False + + component_data = None + if keep_original: + component_data = dict(version_id=version_id) + + component = self.create_component( + path=media, + data=component_data, + location=server_location + ) + + # Auto commit to ensure component exists when sent to server. + self.commit() + + elif ( + hasattr(media, 'entity_type') and + media.entity_type in ('FileComponent',) + ): + # Existing file component. + component = media + if keep_original == 'auto': + keep_original = True + + else: + raise ValueError( + 'Unable to encode media of type: {0}'.format(type(media)) + ) + + operation = { + 'action': 'encode_media', + 'component_id': component['id'], + 'version_id': version_id, + 'keep_original': keep_original + } + + try: + result = self.call([operation]) + + except ftrack_api.exception.ServerError as error: + # Raise informative error if the action is not supported. + if 'Invalid action u\'encode_media\'' in error.message: + raise ftrack_api.exception.ServerCompatibilityError( + 'Server version {0!r} does not support "encode_media", ' + 'please update server and try again.'.format( + self.server_information.get('version') + ) + ) + else: + raise + + return self.get('Job', result[0]['job_id']) + + def get_upload_metadata( + self, component_id, file_name, file_size, checksum=None + ): + '''Return URL and headers used to upload data for *component_id*. + + *file_name* and *file_size* should match the components details. + + The returned URL should be requested using HTTP PUT with the specified + headers. + + The *checksum* is used as the Content-MD5 header and should contain + the base64-encoded 128-bit MD5 digest of the message (without the + headers) according to RFC 1864. This can be used as a message integrity + check to verify that the data is the same data that was originally sent. + ''' + operation = { + 'action': 'get_upload_metadata', + 'component_id': component_id, + 'file_name': file_name, + 'file_size': file_size, + 'checksum': checksum + } + + try: + result = self.call([operation]) + + except ftrack_api.exception.ServerError as error: + # Raise informative error if the action is not supported. + if 'Invalid action u\'get_upload_metadata\'' in error.message: + raise ftrack_api.exception.ServerCompatibilityError( + 'Server version {0!r} does not support ' + '"get_upload_metadata", please update server and try ' + 'again.'.format( + self.server_information.get('version') + ) + ) + else: + raise + + return result[0] + + def send_user_invite(self, user): + '''Send a invitation to the provided *user*. + + *user* is a User instance + + ''' + + self.send_user_invites( + [user] + ) + + def send_user_invites(self, users): + '''Send a invitation to the provided *user*. + + *users* is a list of User instances + + ''' + + operations = [] + + for user in users: + operations.append( + { + 'action':'send_user_invite', + 'user_id': user['id'] + } + ) + + try: + self.call(operations) + + except ftrack_api.exception.ServerError as error: + # Raise informative error if the action is not supported. + if 'Invalid action u\'send_user_invite\'' in error.message: + raise ftrack_api.exception.ServerCompatibilityError( + 'Server version {0!r} does not support ' + '"send_user_invite", please update server and ' + 'try again.'.format( + self.server_information.get('version') + ) + ) + else: + raise + + def send_review_session_invite(self, invitee): + '''Send an invite to a review session to *invitee*. + + *invitee* is a instance of ReviewSessionInvitee. + + .. note:: + + The *invitee* must be committed. + + ''' + self.send_review_session_invites([invitee]) + + def send_review_session_invites(self, invitees): + '''Send an invite to a review session to a list of *invitees*. + + *invitee* is a list of ReviewSessionInvitee objects. + + .. note:: + + All *invitees* must be committed. + + ''' + operations = [] + + for invitee in invitees: + operations.append( + { + 'action': 'send_review_session_invite', + 'review_session_invitee_id': invitee['id'] + } + ) + + try: + self.call(operations) + except ftrack_api.exception.ServerError as error: + # Raise informative error if the action is not supported. + if 'Invalid action u\'send_review_session_invite\'' in error.message: + raise ftrack_api.exception.ServerCompatibilityError( + 'Server version {0!r} does not support ' + '"send_review_session_invite", please update server and ' + 'try again.'.format( + self.server_information.get('version') + ) + ) + else: + raise + + +class AutoPopulatingContext(object): + '''Context manager for temporary change of session auto_populate value.''' + + def __init__(self, session, auto_populate): + '''Initialise context.''' + super(AutoPopulatingContext, self).__init__() + self._session = session + self._auto_populate = auto_populate + self._current_auto_populate = None + + def __enter__(self): + '''Enter context switching to desired auto populate setting.''' + self._current_auto_populate = self._session.auto_populate + self._session.auto_populate = self._auto_populate + + def __exit__(self, exception_type, exception_value, traceback): + '''Exit context resetting auto populate to original setting.''' + self._session.auto_populate = self._current_auto_populate + + +class OperationRecordingContext(object): + '''Context manager for temporary change of session record_operations.''' + + def __init__(self, session, record_operations): + '''Initialise context.''' + super(OperationRecordingContext, self).__init__() + self._session = session + self._record_operations = record_operations + self._current_record_operations = None + + def __enter__(self): + '''Enter context.''' + self._current_record_operations = self._session.record_operations + self._session.record_operations = self._record_operations + + def __exit__(self, exception_type, exception_value, traceback): + '''Exit context.''' + self._session.record_operations = self._current_record_operations + + +class OperationPayload(collections.MutableMapping): + '''Represent operation payload.''' + + def __init__(self, *args, **kwargs): + '''Initialise payload.''' + super(OperationPayload, self).__init__() + self._data = dict() + self.update(dict(*args, **kwargs)) + + def __str__(self): + '''Return string representation.''' + return '<{0} {1}>'.format( + self.__class__.__name__, str(self._data) + ) + + def __getitem__(self, key): + '''Return value for *key*.''' + return self._data[key] + + def __setitem__(self, key, value): + '''Set *value* for *key*.''' + self._data[key] = value + + def __delitem__(self, key): + '''Remove *key*.''' + del self._data[key] + + def __iter__(self): + '''Iterate over all keys.''' + return iter(self._data) + + def __len__(self): + '''Return count of keys.''' + return len(self._data) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/structure/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/__init__.py new file mode 100644 index 0000000000..1aab07ed77 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/__init__.py @@ -0,0 +1,2 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/structure/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/base.py new file mode 100644 index 0000000000..de3335f9f4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/base.py @@ -0,0 +1,38 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from builtins import object +from abc import ABCMeta, abstractmethod +from future.utils import with_metaclass + + +class Structure(with_metaclass(ABCMeta, object)): + '''Structure plugin interface. + + A structure plugin should compute appropriate paths for data. + + ''' + + def __init__(self, prefix=''): + '''Initialise structure.''' + self.prefix = prefix + self.path_separator = '/' + super(Structure, self).__init__() + + @abstractmethod + def get_resource_identifier(self, entity, context=None): + '''Return a resource identifier for supplied *entity*. + + *context* can be a mapping that supplies additional information. + + ''' + + def _get_sequence_expression(self, sequence): + '''Return a sequence expression for *sequence* component.''' + padding = sequence['padding'] + if padding: + expression = '%0{0}d'.format(padding) + else: + expression = '%d' + + return expression diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/structure/entity_id.py b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/entity_id.py new file mode 100644 index 0000000000..ae466bf6d9 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/entity_id.py @@ -0,0 +1,12 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +import ftrack_api.structure.base + + +class EntityIdStructure(ftrack_api.structure.base.Structure): + '''Entity id pass-through structure.''' + + def get_resource_identifier(self, entity, context=None): + '''Return a *resourceIdentifier* for supplied *entity*.''' + return entity['id'] diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/structure/id.py b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/id.py new file mode 100644 index 0000000000..acc3e21b02 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/id.py @@ -0,0 +1,91 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +import os + +import ftrack_api.symbol +import ftrack_api.structure.base + + +class IdStructure(ftrack_api.structure.base.Structure): + '''Id based structure supporting Components only. + + A components unique id will be used to form a path to store the data at. + To avoid millions of entries in one directory each id is chunked into four + prefix directories with the remainder used to name the file:: + + /prefix/1/2/3/4/56789 + + If the component has a defined filetype it will be added to the path:: + + /prefix/1/2/3/4/56789.exr + + Components that are children of container components will be placed inside + the id structure of their parent:: + + /prefix/1/2/3/4/56789/355827648d.exr + /prefix/1/2/3/4/56789/ajf24215b5.exr + + However, sequence children will be named using their label as an index and + a common prefix of 'file.':: + + /prefix/1/2/3/4/56789/file.0001.exr + /prefix/1/2/3/4/56789/file.0002.exr + + ''' + + def get_resource_identifier(self, entity, context=None): + '''Return a resource identifier for supplied *entity*. + + *context* can be a mapping that supplies additional information. + + ''' + if entity.entity_type in ('FileComponent',): + # When in a container, place the file inside a directory named + # after the container. + container = entity['container'] + if container and container is not ftrack_api.symbol.NOT_SET: + path = self.get_resource_identifier(container) + + if container.entity_type in ('SequenceComponent',): + # Label doubles as index for now. + name = 'file.{0}{1}'.format( + entity['name'], entity['file_type'] + ) + parts = [os.path.dirname(path), name] + + else: + # Just place uniquely identified file into directory + name = entity['id'] + entity['file_type'] + parts = [path, name] + + else: + name = entity['id'][4:] + entity['file_type'] + parts = ([self.prefix] + list(entity['id'][:4]) + [name]) + + elif entity.entity_type in ('SequenceComponent',): + name = 'file' + + # Add a sequence identifier. + sequence_expression = self._get_sequence_expression(entity) + name += '.{0}'.format(sequence_expression) + + if ( + entity['file_type'] and + entity['file_type'] is not ftrack_api.symbol.NOT_SET + ): + name += entity['file_type'] + + parts = ([self.prefix] + list(entity['id'][:4]) + + [entity['id'][4:]] + [name]) + + elif entity.entity_type in ('ContainerComponent',): + # Just an id directory + parts = ([self.prefix] + + list(entity['id'][:4]) + [entity['id'][4:]]) + + else: + raise NotImplementedError('Cannot generate path for unsupported ' + 'entity {0}'.format(entity)) + + return self.path_separator.join(parts).strip('/') diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/structure/origin.py b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/origin.py new file mode 100644 index 0000000000..0d4d3a57f5 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/origin.py @@ -0,0 +1,28 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +from .base import Structure + + +class OriginStructure(Structure): + '''Origin structure that passes through existing resource identifier.''' + + def get_resource_identifier(self, entity, context=None): + '''Return a resource identifier for supplied *entity*. + + *context* should be a mapping that includes at least a + 'source_resource_identifier' key that refers to the resource identifier + to pass through. + + ''' + if context is None: + context = {} + + resource_identifier = context.get('source_resource_identifier') + if resource_identifier is None: + raise ValueError( + 'Could not generate resource identifier as no source resource ' + 'identifier found in passed context.' + ) + + return resource_identifier diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/structure/standard.py b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/standard.py new file mode 100644 index 0000000000..f1833f3f16 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/structure/standard.py @@ -0,0 +1,215 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2015 ftrack + +from builtins import str +import os +import re +import unicodedata + +import ftrack_api.symbol +import ftrack_api.structure.base + + +class StandardStructure(ftrack_api.structure.base.Structure): + '''Project hierarchy based structure that only supports Components. + + The resource identifier is generated from the project code, the name + of objects in the project structure, asset name and version number:: + + my_project/folder_a/folder_b/asset_name/v003 + + If the component is a `FileComponent` then the name of the component and the + file type are used as filename in the resource_identifier:: + + my_project/folder_a/folder_b/asset_name/v003/foo.jpg + + If the component is a `SequenceComponent` then a sequence expression, + `%04d`, is used. E.g. a component with the name `foo` yields:: + + my_project/folder_a/folder_b/asset_name/v003/foo.%04d.jpg + + For the member components their index in the sequence is used:: + + my_project/folder_a/folder_b/asset_name/v003/foo.0042.jpg + + The name of the component is added to the resource identifier if the + component is a `ContainerComponent`. E.g. a container component with the + name `bar` yields:: + + my_project/folder_a/folder_b/asset_name/v003/bar + + For a member of that container the file name is based on the component name + and file type:: + + my_project/folder_a/folder_b/asset_name/v003/bar/baz.pdf + + ''' + + def __init__( + self, project_versions_prefix=None, illegal_character_substitute='_' + ): + '''Initialise structure. + + If *project_versions_prefix* is defined, insert after the project code + for versions published directly under the project:: + + my_project//v001/foo.jpg + + Replace illegal characters with *illegal_character_substitute* if + defined. + + .. note:: + + Nested component containers/sequences are not supported. + + ''' + super(StandardStructure, self).__init__() + self.project_versions_prefix = project_versions_prefix + self.illegal_character_substitute = illegal_character_substitute + + def _get_parts(self, entity): + '''Return resource identifier parts from *entity*.''' + session = entity.session + + version = entity['version'] + + if version is ftrack_api.symbol.NOT_SET and entity['version_id']: + version = session.get('AssetVersion', entity['version_id']) + + error_message = ( + 'Component {0!r} must be attached to a committed ' + 'version and a committed asset with a parent context.'.format( + entity + ) + ) + + if ( + version is ftrack_api.symbol.NOT_SET or + version in session.created + ): + raise ftrack_api.exception.StructureError(error_message) + + link = version['link'] + + if not link: + raise ftrack_api.exception.StructureError(error_message) + + structure_names = [ + item['name'] + for item in link[1:-1] + ] + + project_id = link[0]['id'] + project = session.get('Project', project_id) + asset = version['asset'] + + version_number = self._format_version(version['version']) + + parts = [] + parts.append(project['name']) + + if structure_names: + parts.extend(structure_names) + elif self.project_versions_prefix: + # Add *project_versions_prefix* if configured and the version is + # published directly under the project. + parts.append(self.project_versions_prefix) + + parts.append(asset['name']) + parts.append(version_number) + + return [self.sanitise_for_filesystem(part) for part in parts] + + def _format_version(self, number): + '''Return a formatted string representing version *number*.''' + return 'v{0:03d}'.format(number) + + def sanitise_for_filesystem(self, value): + '''Return *value* with illegal filesystem characters replaced. + + An illegal character is one that is not typically valid for filesystem + usage, such as non ascii characters, or can be awkward to use in a + filesystem, such as spaces. Replace these characters with + the character specified by *illegal_character_substitute* on + initialisation. If no character was specified as substitute then return + *value* unmodified. + + ''' + if self.illegal_character_substitute is None: + return value + + value = unicodedata.normalize('NFKD', str(value)).encode('ascii', 'ignore') + value = re.sub('[^\w\.-]', self.illegal_character_substitute, value.decode('utf-8')) + return str(value.strip().lower()) + + def get_resource_identifier(self, entity, context=None): + '''Return a resource identifier for supplied *entity*. + + *context* can be a mapping that supplies additional information, but + is unused in this implementation. + + + Raise a :py:exc:`ftrack_api.exeption.StructureError` if *entity* is not + attached to a committed version and a committed asset with a parent + context. + + ''' + if entity.entity_type in ('FileComponent',): + container = entity['container'] + + if container: + # Get resource identifier for container. + container_path = self.get_resource_identifier(container) + + if container.entity_type in ('SequenceComponent',): + # Strip the sequence component expression from the parent + # container and back the correct filename, i.e. + # /sequence/component/sequence_component_name.0012.exr. + name = '{0}.{1}{2}'.format( + container['name'], entity['name'], entity['file_type'] + ) + parts = [ + os.path.dirname(container_path), + self.sanitise_for_filesystem(name) + ] + + else: + # Container is not a sequence component so add it as a + # normal component inside the container. + name = entity['name'] + entity['file_type'] + parts = [ + container_path, self.sanitise_for_filesystem(name) + ] + + else: + # File component does not have a container, construct name from + # component name and file type. + parts = self._get_parts(entity) + name = entity['name'] + entity['file_type'] + parts.append(self.sanitise_for_filesystem(name)) + + elif entity.entity_type in ('SequenceComponent',): + # Create sequence expression for the sequence component and add it + # to the parts. + parts = self._get_parts(entity) + sequence_expression = self._get_sequence_expression(entity) + parts.append( + '{0}.{1}{2}'.format( + self.sanitise_for_filesystem(entity['name']), + sequence_expression, + self.sanitise_for_filesystem(entity['file_type']) + ) + ) + + elif entity.entity_type in ('ContainerComponent',): + # Add the name of the container to the resource identifier parts. + parts = self._get_parts(entity) + parts.append(self.sanitise_for_filesystem(entity['name'])) + + else: + raise NotImplementedError( + 'Cannot generate resource identifier for unsupported ' + 'entity {0!r}'.format(entity) + ) + + return self.path_separator.join(parts) diff --git a/pype/modules/ftrack/python2_vendor/ftrack_api/symbol.py b/pype/modules/ftrack/python2_vendor/ftrack_api/symbol.py new file mode 100644 index 0000000000..4906c4e792 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/ftrack_api/symbol.py @@ -0,0 +1,78 @@ +# :coding: utf-8 +# :copyright: Copyright (c) 2014 ftrack + +import os + + +from builtins import object +class Symbol(object): + '''A constant symbol.''' + + def __init__(self, name, value=True): + '''Initialise symbol with unique *name* and *value*. + + *value* is used for nonzero testing. + + ''' + self.name = name + self.value = value + + def __str__(self): + '''Return string representation.''' + return self.name + + def __repr__(self): + '''Return representation.''' + return '{0}({1})'.format(self.__class__.__name__, self.name) + + def __bool__(self): + '''Return whether symbol represents non-zero value.''' + return bool(self.value) + + def __copy__(self): + '''Return shallow copy. + + Overridden to always return same instance. + + ''' + return self + + +#: Symbol representing that no value has been set or loaded. +NOT_SET = Symbol('NOT_SET', False) + +#: Symbol representing created state. +CREATED = Symbol('CREATED') + +#: Symbol representing modified state. +MODIFIED = Symbol('MODIFIED') + +#: Symbol representing deleted state. +DELETED = Symbol('DELETED') + +#: Topic published when component added to a location. +COMPONENT_ADDED_TO_LOCATION_TOPIC = 'ftrack.location.component-added' + +#: Topic published when component removed from a location. +COMPONENT_REMOVED_FROM_LOCATION_TOPIC = 'ftrack.location.component-removed' + +#: Identifier of builtin origin location. +ORIGIN_LOCATION_ID = 'ce9b348f-8809-11e3-821c-20c9d081909b' + +#: Identifier of builtin unmanaged location. +UNMANAGED_LOCATION_ID = 'cb268ecc-8809-11e3-a7e2-20c9d081909b' + +#: Identifier of builtin review location. +REVIEW_LOCATION_ID = 'cd41be70-8809-11e3-b98a-20c9d081909b' + +#: Identifier of builtin connect location. +CONNECT_LOCATION_ID = '07b82a97-8cf9-11e3-9383-20c9d081909b' + +#: Identifier of builtin server location. +SERVER_LOCATION_ID = '3a372bde-05bc-11e4-8908-20c9d081909b' + +#: Chunk size used when working with data, default to 1Mb. +CHUNK_SIZE = int(os.getenv('FTRACK_API_FILE_CHUNK_SIZE', 0)) or 1024*1024 + +#: Symbol representing syncing users with ldap +JOB_SYNC_USERS_LDAP = Symbol('SYNC_USERS_LDAP')