mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge remote-tracking branch 'origin/ftrack-all-jakub' into sync-avalon
# Conflicts: # pype/plugins/global/publish/collect_deadline_user.py # pype/plugins/global/publish/submit_publish_job.py fixed conflicts
This commit is contained in:
commit
dbe6203318
420 changed files with 108195 additions and 499 deletions
93
.gitignore
vendored
93
.gitignore
vendored
|
|
@ -4,96 +4,3 @@
|
|||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*,cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# virtualenv
|
||||
.venv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# Pycharm IDE settings
|
||||
.idea
|
||||
|
|
|
|||
|
|
@ -30,6 +30,11 @@ def install():
|
|||
avalon.data["familiesStateDefault"] = False
|
||||
avalon.data["familiesStateToggled"] = family_states
|
||||
|
||||
# # work files start at app start
|
||||
# workfiles.show(
|
||||
# os.environ["AVALON_WORKDIR"]
|
||||
# )
|
||||
|
||||
|
||||
def uninstall():
|
||||
print("Deregistering Nuke plug-ins..")
|
||||
|
|
|
|||
|
|
@ -3,46 +3,40 @@ import sys
|
|||
from avalon.vendor.Qt import QtGui
|
||||
import avalon.nuke
|
||||
|
||||
import nuke
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._project = None
|
||||
|
||||
|
||||
def update_frame_range(start, end, root=None, set_render_range=True):
|
||||
"""Set Fusion comp's start and end frame range
|
||||
def update_frame_range(start, end, root=None):
|
||||
"""Set Nuke script start and end frame range
|
||||
|
||||
Args:
|
||||
start (float, int): start frame
|
||||
end (float, int): end frame
|
||||
comp (object, Optional): comp object from fusion
|
||||
set_render_range (bool, Optional): When True this will also set the
|
||||
composition's render start and end frame.
|
||||
root (object, Optional): root object from nuke's script
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
if not root:
|
||||
root, nodes = avalon.nuke.get_current_comp()
|
||||
|
||||
knobs = {
|
||||
"COMPN_GlobalStart": start,
|
||||
"COMPN_GlobalEnd": end
|
||||
"first_frame": start,
|
||||
"last_frame": end
|
||||
}
|
||||
|
||||
if set_render_range:
|
||||
knobs.update({
|
||||
"COMPN_RenderStart": start,
|
||||
"COMPN_RenderEnd": end
|
||||
})
|
||||
|
||||
with avalon.nuke.comp_lock_and_undo_chunk():
|
||||
comp.SetAttrs(attrs)
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
for key, value in knobs.items():
|
||||
if root:
|
||||
root[key].setValue(value)
|
||||
else:
|
||||
nuke.root()[key].setValue(value)
|
||||
|
||||
|
||||
def get_additional_data(container):
|
||||
"""Get Fusion related data for the container
|
||||
"""Get Nuke's related data for the container
|
||||
|
||||
Args:
|
||||
container(dict): the container found by the ls() function
|
||||
|
|
@ -51,11 +45,16 @@ def get_additional_data(container):
|
|||
dict
|
||||
"""
|
||||
|
||||
tool = container["_tool"]
|
||||
tile_color = tool.TileColor
|
||||
node = container["_tool"]
|
||||
tile_color = node['tile_color'].value()
|
||||
if tile_color is None:
|
||||
return {}
|
||||
|
||||
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
|
||||
tile_color["G"],
|
||||
tile_color["B"])}
|
||||
hex = '%08x' % tile_color
|
||||
rgba = [
|
||||
float(int(hex[0:2], 16)) / 255.0,
|
||||
float(int(hex[2:4], 16)) / 255.0,
|
||||
float(int(hex[4:6], 16)) / 255.0
|
||||
]
|
||||
|
||||
return {"color": QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
from avalon import api, style
|
||||
from avalon.vendor.Qt import QtGui, QtWidgets
|
||||
|
||||
import avalon.fusion
|
||||
import avalon.nuke
|
||||
|
||||
|
||||
class FusionSetToolColor(api.InventoryAction):
|
||||
class NukeSetToolColor(api.InventoryAction):
|
||||
"""Update the color of the selected tools"""
|
||||
|
||||
label = "Set Tool Color"
|
||||
|
|
@ -16,15 +16,20 @@ class FusionSetToolColor(api.InventoryAction):
|
|||
"""Color all selected tools the selected colors"""
|
||||
|
||||
result = []
|
||||
comp = avalon.fusion.get_current_comp()
|
||||
|
||||
# Get tool color
|
||||
first = containers[0]
|
||||
tool = first["_tool"]
|
||||
color = tool.TileColor
|
||||
node = first["_tool"]
|
||||
color = node["tile_color"].value()
|
||||
hex = '%08x' % color
|
||||
rgba = [
|
||||
float(int(hex[0:2], 16)) / 255.0,
|
||||
float(int(hex[2:4], 16)) / 255.0,
|
||||
float(int(hex[4:6], 16)) / 255.0
|
||||
]
|
||||
|
||||
if color is not None:
|
||||
qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"])
|
||||
qcolor = QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])
|
||||
else:
|
||||
qcolor = self._fallback_color
|
||||
|
||||
|
|
@ -33,15 +38,21 @@ class FusionSetToolColor(api.InventoryAction):
|
|||
if not picked_color:
|
||||
return
|
||||
|
||||
with avalon.fusion.comp_lock_and_undo_chunk(comp):
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
for container in containers:
|
||||
# Convert color to RGB 0-1 floats
|
||||
rgb_f = picked_color.getRgbF()
|
||||
rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
|
||||
|
||||
hexColour = int(
|
||||
'%02x%02x%02x%02x' % (
|
||||
rgb_f[0]*255,
|
||||
rgb_f[1]*255,
|
||||
rgb_f[2]*255,
|
||||
1),
|
||||
16
|
||||
)
|
||||
# Update tool
|
||||
tool = container["_tool"]
|
||||
tool.TileColor = rgb_f_table
|
||||
node = container["_tool"]
|
||||
node['tile_color'].value(hexColour)
|
||||
|
||||
result.append(container)
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ def _get_script():
|
|||
# todo: use a more elegant way to get the python script
|
||||
|
||||
try:
|
||||
from pype.scripts import publish_filesequence
|
||||
from pype.fusion.scripts import publish_filesequence
|
||||
except Exception:
|
||||
raise RuntimeError("Expected module 'publish_imagesequence'"
|
||||
"to be available")
|
||||
|
|
|
|||
|
|
@ -34,8 +34,17 @@ class CollectDeadlineUser(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = "Deadline User"
|
||||
<<<<<<< HEAD
|
||||
hosts = ['maya', 'fusion']
|
||||
families = ["studio.renderlayer", "studio.saver.deadline"]
|
||||
=======
|
||||
hosts = ['maya', 'fusion', 'nuke']
|
||||
families = [
|
||||
"renderlayer",
|
||||
"saver.deadline",
|
||||
"imagesequence"
|
||||
]
|
||||
>>>>>>> origin/ftrack-all-jakub
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
|
|
@ -49,4 +58,3 @@ class CollectDeadlineUser(pyblish.api.ContextPlugin):
|
|||
|
||||
self.log.info("Found Deadline user: {}".format(user))
|
||||
context.data['deadlineUser'] = user
|
||||
|
||||
|
|
|
|||
|
|
@ -88,6 +88,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder
|
||||
targets = ["filesequence"]
|
||||
label = "File Sequences"
|
||||
hosts = ['maya']
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
126
pype/plugins/global/publish/collect_json.py
Normal file
126
pype/plugins/global/publish/collect_json.py
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
|
||||
import pyblish.api
|
||||
from config.vendor import clique
|
||||
|
||||
|
||||
class CollectJSON(pyblish.api.ContextPlugin):
|
||||
""" Collecting the json files in current directory. """
|
||||
|
||||
label = "JSON"
|
||||
order = pyblish.api.CollectorOrder
|
||||
|
||||
def version_get(self, string, prefix):
|
||||
""" Extract version information from filenames. Code from Foundry"s
|
||||
nukescripts.version_get()
|
||||
"""
|
||||
|
||||
regex = r"[/_.]{}\d+".format(prefix)
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
|
||||
if not len(matches):
|
||||
msg = "No '_{}#' found in '{}'".format(prefix, string)
|
||||
raise ValueError(msg)
|
||||
return matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group()
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data("currentFile")
|
||||
# Skip if current file is not a directory
|
||||
if not os.path.isdir(current_file):
|
||||
return
|
||||
|
||||
# Traverse directory and collect collections from json files.
|
||||
instances = []
|
||||
for root, dirs, files in os.walk(current_file):
|
||||
for f in files:
|
||||
if f.endswith(".json"):
|
||||
with open(os.path.join(root, f)) as json_data:
|
||||
for data in json.load(json_data):
|
||||
instances.append(data)
|
||||
|
||||
# Validate instance based on supported families.
|
||||
valid_families = ["img", "cache", "scene", "mov"]
|
||||
valid_data = []
|
||||
for data in instances:
|
||||
families = data.get("families", []) + [data["family"]]
|
||||
family_type = list(set(families) & set(valid_families))
|
||||
if family_type:
|
||||
valid_data.append(data)
|
||||
|
||||
# Create existing output instance.
|
||||
scanned_dirs = []
|
||||
files = []
|
||||
collections = []
|
||||
for data in valid_data:
|
||||
if "collection" not in data.keys():
|
||||
continue
|
||||
if data["collection"] is None:
|
||||
continue
|
||||
|
||||
instance_collection = clique.parse(data["collection"])
|
||||
|
||||
try:
|
||||
version = self.version_get(
|
||||
os.path.basename(instance_collection.format()), "v"
|
||||
)[1]
|
||||
except KeyError:
|
||||
# Ignore any output that is not versioned
|
||||
continue
|
||||
|
||||
# Getting collections of all previous versions and current version
|
||||
for count in range(1, int(version) + 1):
|
||||
|
||||
# Generate collection
|
||||
version_string = "v" + str(count).zfill(len(version))
|
||||
head = instance_collection.head.replace(
|
||||
"v" + version, version_string
|
||||
)
|
||||
collection = clique.Collection(
|
||||
head=head.replace("\\", "/"),
|
||||
padding=instance_collection.padding,
|
||||
tail=instance_collection.tail
|
||||
)
|
||||
collection.version = count
|
||||
|
||||
# Scan collection directory
|
||||
scan_dir = os.path.dirname(collection.head)
|
||||
if scan_dir not in scanned_dirs and os.path.exists(scan_dir):
|
||||
for f in os.listdir(scan_dir):
|
||||
file_path = os.path.join(scan_dir, f)
|
||||
files.append(file_path.replace("\\", "/"))
|
||||
scanned_dirs.append(scan_dir)
|
||||
|
||||
# Match files to collection and add
|
||||
for f in files:
|
||||
if collection.match(f):
|
||||
collection.add(f)
|
||||
|
||||
# Skip if no files were found in the collection
|
||||
if not list(collection):
|
||||
continue
|
||||
|
||||
# Skip existing collections
|
||||
if collection in collections:
|
||||
continue
|
||||
|
||||
instance = context.create_instance(name=data["name"])
|
||||
version = self.version_get(
|
||||
os.path.basename(collection.format()), "v"
|
||||
)[1]
|
||||
|
||||
basename = os.path.basename(collection.format())
|
||||
instance.data["label"] = "{0} - {1}".format(
|
||||
data["name"], basename
|
||||
)
|
||||
|
||||
families = data["families"] + [data["family"]]
|
||||
family = list(set(valid_families) & set(families))[0]
|
||||
instance.data["family"] = family
|
||||
instance.data["families"] = ["output"]
|
||||
instance.data["collection"] = collection
|
||||
instance.data["version"] = int(version)
|
||||
instance.data["publish"] = False
|
||||
|
||||
collections.append(collection)
|
||||
49
pype/plugins/global/publish/extract_json.py
Normal file
49
pype/plugins/global/publish/extract_json.py
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
import json
|
||||
import datetime
|
||||
import time
|
||||
|
||||
import pyblish.api
|
||||
from config.vendor import clique
|
||||
|
||||
|
||||
class ExtractJSON(pyblish.api.ContextPlugin):
|
||||
""" Extract all instances to a serialized json file. """
|
||||
|
||||
order = pyblish.api.IntegratorOrder
|
||||
label = "JSON"
|
||||
|
||||
def process(self, context):
|
||||
|
||||
workspace = os.path.join(
|
||||
os.path.dirname(context.data["currentFile"]), "workspace",
|
||||
"instances")
|
||||
|
||||
if not os.path.exists(workspace):
|
||||
os.makedirs(workspace)
|
||||
|
||||
output_data = []
|
||||
for instance in context:
|
||||
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
if isinstance(value, clique.Collection):
|
||||
value = value.format()
|
||||
|
||||
try:
|
||||
json.dumps(value)
|
||||
data[key] = value
|
||||
except KeyError:
|
||||
msg = "\"{0}\"".format(value)
|
||||
msg += " in instance.data[\"{0}\"]".format(key)
|
||||
msg += " could not be serialized."
|
||||
self.log.debug(msg)
|
||||
|
||||
output_data.append(data)
|
||||
|
||||
timestamp = datetime.datetime.fromtimestamp(
|
||||
time.time()).strftime("%Y%m%d-%H%M%S")
|
||||
filename = timestamp + "_instances.json"
|
||||
|
||||
with open(os.path.join(workspace, filename), "w") as outfile:
|
||||
outfile.write(json.dumps(output_data, indent=4, sort_keys=True))
|
||||
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
import json
|
||||
import pprint
|
||||
import re
|
||||
|
||||
from avalon import api, io
|
||||
|
|
@ -12,7 +11,7 @@ import pyblish.api
|
|||
def _get_script():
|
||||
"""Get path to the image sequence script"""
|
||||
try:
|
||||
from pype.scripts import publish_filesequence
|
||||
from pype.fusion.scripts import publish_filesequence
|
||||
except Exception as e:
|
||||
raise RuntimeError("Expected module 'publish_imagesequence'"
|
||||
"to be available")
|
||||
|
|
@ -122,8 +121,14 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Submit image sequence jobs to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["fusion", "maya"]
|
||||
families = ["studio.saver.deadline", "studio.renderlayer"]
|
||||
|
||||
hosts = ["fusion", "maya", "nuke"]
|
||||
|
||||
families = [
|
||||
"studio.saver.deadline",
|
||||
"studio.renderlayer",
|
||||
"studio.imagesequence"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ class ValidateCurrentSaveFile(pyblish.api.ContextPlugin):
|
|||
|
||||
label = "Validate File Saved"
|
||||
order = pyblish.api.ValidatorOrder - 0.1
|
||||
hosts = ["maya", "houdini"]
|
||||
hosts = ["maya", "houdini", "nuke"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import sys
|
||||
import avalon.api
|
||||
import avalon.nuke
|
||||
import nuke
|
||||
|
||||
|
||||
class CrateWriteExr(avalon.api.Creator):
|
||||
|
|
@ -16,10 +16,9 @@ class CrateWriteExr(avalon.api.Creator):
|
|||
# self.data.setdefault("subset", "this")
|
||||
|
||||
def process(self):
|
||||
nuke = getattr(sys.modules["__main__"], "nuke", None)
|
||||
# nuke = getattr(sys.modules["__main__"], "nuke", None)
|
||||
data = {}
|
||||
ext = "exr"
|
||||
root, nodes = avalon.nuke.get_current_script(nuke=nuke)
|
||||
|
||||
# todo: improve method of getting current environment
|
||||
# todo: pref avalon.Session over os.environ
|
||||
|
|
@ -34,7 +33,7 @@ class CrateWriteExr(avalon.api.Creator):
|
|||
filename
|
||||
).replace("\\", "/")
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop(nuke):
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
w = nuke.createNode(
|
||||
"Write",
|
||||
"name {}".format(self.name))
|
||||
|
|
|
|||
|
|
@ -1,25 +1,21 @@
|
|||
# from avalon import api
|
||||
#
|
||||
#
|
||||
# class FusionSelectContainers(api.InventoryAction):
|
||||
#
|
||||
# label = "Select Containers"
|
||||
# icon = "mouse-pointer"
|
||||
# color = "#d8d8d8"
|
||||
#
|
||||
# def process(self, containers):
|
||||
#
|
||||
# import avalon.fusion
|
||||
#
|
||||
# tools = [i["_tool"] for i in containers]
|
||||
#
|
||||
# comp = avalon.fusion.get_current_comp()
|
||||
# flow = comp.CurrentFrame.FlowView
|
||||
#
|
||||
# with avalon.fusion.comp_lock_and_undo_chunk(comp, self.label):
|
||||
# # Clear selection
|
||||
# flow.Select()
|
||||
#
|
||||
# # Select tool
|
||||
# for tool in tools:
|
||||
# flow.Select(tool)
|
||||
from avalon import api
|
||||
|
||||
|
||||
class NukeSelectContainers(api.InventoryAction):
|
||||
|
||||
label = "Select Containers"
|
||||
icon = "mouse-pointer"
|
||||
color = "#d8d8d8"
|
||||
|
||||
def process(self, containers):
|
||||
|
||||
import avalon.nuke
|
||||
|
||||
nodes = [i["_tool"] for i in containers]
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
# clear previous_selection
|
||||
[n['selected'].setValue(False) for n in nodes]
|
||||
# Select tool
|
||||
for node in nodes:
|
||||
node["selected"].setValue(True)
|
||||
|
|
|
|||
|
|
@ -1,76 +1,76 @@
|
|||
# """A module containing generic loader actions that will display in the Loader.
|
||||
#
|
||||
# """
|
||||
#
|
||||
# from avalon import api
|
||||
#
|
||||
#
|
||||
# class FusionSetFrameRangeLoader(api.Loader):
|
||||
# """Specific loader of Alembic for the avalon.animation family"""
|
||||
#
|
||||
# families = ["animation",
|
||||
# "camera",
|
||||
# "imagesequence",
|
||||
# "yeticache",
|
||||
# "pointcache"]
|
||||
# representations = ["*"]
|
||||
#
|
||||
# label = "Set frame range"
|
||||
# order = 11
|
||||
# icon = "clock-o"
|
||||
# color = "white"
|
||||
#
|
||||
# def load(self, context, name, namespace, data):
|
||||
#
|
||||
# from pype.fusion import lib
|
||||
#
|
||||
# version = context['version']
|
||||
# version_data = version.get("data", {})
|
||||
#
|
||||
# start = version_data.get("startFrame", None)
|
||||
# end = version_data.get("endFrame", None)
|
||||
#
|
||||
# if start is None or end is None:
|
||||
# print("Skipping setting frame range because start or "
|
||||
# "end frame data is missing..")
|
||||
# return
|
||||
#
|
||||
# lib.update_frame_range(start, end)
|
||||
#
|
||||
#
|
||||
# class FusionSetFrameRangeWithHandlesLoader(api.Loader):
|
||||
# """Specific loader of Alembic for the avalon.animation family"""
|
||||
#
|
||||
# families = ["animation",
|
||||
# "camera",
|
||||
# "imagesequence",
|
||||
# "yeticache",
|
||||
# "pointcache"]
|
||||
# representations = ["*"]
|
||||
#
|
||||
# label = "Set frame range (with handles)"
|
||||
# order = 12
|
||||
# icon = "clock-o"
|
||||
# color = "white"
|
||||
#
|
||||
# def load(self, context, name, namespace, data):
|
||||
#
|
||||
# from pype.fusion import lib
|
||||
#
|
||||
# version = context['version']
|
||||
# version_data = version.get("data", {})
|
||||
#
|
||||
# start = version_data.get("startFrame", None)
|
||||
# end = version_data.get("endFrame", None)
|
||||
#
|
||||
# if start is None or end is None:
|
||||
# print("Skipping setting frame range because start or "
|
||||
# "end frame data is missing..")
|
||||
# return
|
||||
#
|
||||
# # Include handles
|
||||
# handles = version_data.get("handles", 0)
|
||||
# start -= handles
|
||||
# end += handles
|
||||
#
|
||||
# lib.update_frame_range(start, end)
|
||||
"""A module containing generic loader actions that will display in the Loader.
|
||||
|
||||
"""
|
||||
|
||||
from avalon import api
|
||||
|
||||
|
||||
class NukeSetFrameRangeLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"imagesequence",
|
||||
"yeticache",
|
||||
"pointcache"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Set frame range"
|
||||
order = 11
|
||||
icon = "clock-o"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from pype.nuke import lib
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
start = version_data.get("startFrame", None)
|
||||
end = version_data.get("endFrame", None)
|
||||
|
||||
if start is None or end is None:
|
||||
print("Skipping setting frame range because start or "
|
||||
"end frame data is missing..")
|
||||
return
|
||||
|
||||
lib.update_frame_range(start, end)
|
||||
|
||||
|
||||
class NukeSetFrameRangeWithHandlesLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
"imagesequence",
|
||||
"yeticache",
|
||||
"pointcache"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Set frame range (with handles)"
|
||||
order = 12
|
||||
icon = "clock-o"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from pype.nuke import lib
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
start = version_data.get("startFrame", None)
|
||||
end = version_data.get("endFrame", None)
|
||||
|
||||
if start is None or end is None:
|
||||
print("Skipping setting frame range because start or "
|
||||
"end frame data is missing..")
|
||||
return
|
||||
|
||||
# Include handles
|
||||
handles = version_data.get("handles", 0)
|
||||
start -= handles
|
||||
end += handles
|
||||
|
||||
lib.update_frame_range(start, end)
|
||||
|
|
|
|||
|
|
@ -1,259 +1,252 @@
|
|||
# import os
|
||||
# import contextlib
|
||||
#
|
||||
# from avalon import api
|
||||
# import avalon.io as io
|
||||
#
|
||||
#
|
||||
# @contextlib.contextmanager
|
||||
# def preserve_inputs(tool, inputs):
|
||||
# """Preserve the tool's inputs after context"""
|
||||
#
|
||||
# comp = tool.Comp()
|
||||
#
|
||||
# values = {}
|
||||
# for name in inputs:
|
||||
# tool_input = getattr(tool, name)
|
||||
# value = tool_input[comp.TIME_UNDEFINED]
|
||||
# values[name] = value
|
||||
#
|
||||
# try:
|
||||
# yield
|
||||
# finally:
|
||||
# for name, value in values.items():
|
||||
# tool_input = getattr(tool, name)
|
||||
# tool_input[comp.TIME_UNDEFINED] = value
|
||||
#
|
||||
#
|
||||
# @contextlib.contextmanager
|
||||
# def preserve_trim(loader, log=None):
|
||||
# """Preserve the relative trim of the Loader tool.
|
||||
#
|
||||
# This tries to preserve the loader's trim (trim in and trim out) after
|
||||
# the context by reapplying the "amount" it trims on the clip's length at
|
||||
# start and end.
|
||||
#
|
||||
# """
|
||||
#
|
||||
# # Get original trim as amount of "trimming" from length
|
||||
# time = loader.Comp().TIME_UNDEFINED
|
||||
# length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
|
||||
# trim_from_start = loader["ClipTimeStart"][time]
|
||||
# trim_from_end = length - loader["ClipTimeEnd"][time]
|
||||
#
|
||||
# try:
|
||||
# yield
|
||||
# finally:
|
||||
#
|
||||
# length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
|
||||
# if trim_from_start > length:
|
||||
# trim_from_start = length
|
||||
# if log:
|
||||
# log.warning("Reducing trim in to %d "
|
||||
# "(because of less frames)" % trim_from_start)
|
||||
#
|
||||
# remainder = length - trim_from_start
|
||||
# if trim_from_end > remainder:
|
||||
# trim_from_end = remainder
|
||||
# if log:
|
||||
# log.warning("Reducing trim in to %d "
|
||||
# "(because of less frames)" % trim_from_end)
|
||||
#
|
||||
# loader["ClipTimeStart"][time] = trim_from_start
|
||||
# loader["ClipTimeEnd"][time] = length - trim_from_end
|
||||
#
|
||||
#
|
||||
# def loader_shift(loader, frame, relative=True):
|
||||
# """Shift global in time by i preserving duration
|
||||
#
|
||||
# This moves the loader by i frames preserving global duration. When relative
|
||||
# is False it will shift the global in to the start frame.
|
||||
#
|
||||
# Args:
|
||||
# loader (tool): The fusion loader tool.
|
||||
# frame (int): The amount of frames to move.
|
||||
# relative (bool): When True the shift is relative, else the shift will
|
||||
# change the global in to frame.
|
||||
#
|
||||
# Returns:
|
||||
# int: The resulting relative frame change (how much it moved)
|
||||
#
|
||||
# """
|
||||
# comp = loader.Comp()
|
||||
# time = comp.TIME_UNDEFINED
|
||||
#
|
||||
# old_in = loader["GlobalIn"][time]
|
||||
# old_out = loader["GlobalOut"][time]
|
||||
#
|
||||
# if relative:
|
||||
# shift = frame
|
||||
# else:
|
||||
# shift = frame - old_in
|
||||
#
|
||||
# # Shifting global in will try to automatically compensate for the change
|
||||
# # in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
|
||||
# # input values to "just shift" the clip
|
||||
# with preserve_inputs(loader, inputs=["ClipTimeStart",
|
||||
# "ClipTimeEnd",
|
||||
# "HoldFirstFrame",
|
||||
# "HoldLastFrame"]):
|
||||
#
|
||||
# # GlobalIn cannot be set past GlobalOut or vice versa
|
||||
# # so we must apply them in the order of the shift.
|
||||
# if shift > 0:
|
||||
# loader["GlobalOut"][time] = old_out + shift
|
||||
# loader["GlobalIn"][time] = old_in + shift
|
||||
# else:
|
||||
# loader["GlobalIn"][time] = old_in + shift
|
||||
# loader["GlobalOut"][time] = old_out + shift
|
||||
#
|
||||
# return int(shift)
|
||||
#
|
||||
#
|
||||
# class FusionLoadSequence(api.Loader):
|
||||
# """Load image sequence into Fusion"""
|
||||
#
|
||||
# families = ["imagesequence"]
|
||||
# representations = ["*"]
|
||||
#
|
||||
# label = "Load sequence"
|
||||
# order = -10
|
||||
# icon = "code-fork"
|
||||
# color = "orange"
|
||||
#
|
||||
# def load(self, context, name, namespace, data):
|
||||
#
|
||||
# from avalon.fusion import (
|
||||
# imprint_container,
|
||||
# get_current_comp,
|
||||
# comp_lock_and_undo_chunk
|
||||
# )
|
||||
#
|
||||
# # Fallback to asset name when namespace is None
|
||||
# if namespace is None:
|
||||
# namespace = context['asset']['name']
|
||||
#
|
||||
# # Use the first file for now
|
||||
# path = self._get_first_image(self.fname)
|
||||
#
|
||||
# # Create the Loader with the filename path set
|
||||
# comp = get_current_comp()
|
||||
# with comp_lock_and_undo_chunk(comp, "Create Loader"):
|
||||
#
|
||||
# args = (-32768, -32768)
|
||||
# tool = comp.AddTool("Loader", *args)
|
||||
# tool["Clip"] = path
|
||||
#
|
||||
# # Set global in point to start frame (if in version.data)
|
||||
# start = context["version"]["data"].get("startFrame", None)
|
||||
# if start is not None:
|
||||
# loader_shift(tool, start, relative=False)
|
||||
#
|
||||
# imprint_container(tool,
|
||||
# name=name,
|
||||
# namespace=namespace,
|
||||
# context=context,
|
||||
# loader=self.__class__.__name__)
|
||||
#
|
||||
# def switch(self, container, representation):
|
||||
# self.update(container, representation)
|
||||
#
|
||||
# def update(self, container, representation):
|
||||
# """Update the Loader's path
|
||||
#
|
||||
# Fusion automatically tries to reset some variables when changing
|
||||
# the loader's path to a new file. These automatic changes are to its
|
||||
# inputs:
|
||||
# - ClipTimeStart: Fusion reset to 0 if duration changes
|
||||
# - We keep the trim in as close as possible to the previous value.
|
||||
# When there are less frames then the amount of trim we reduce
|
||||
# it accordingly.
|
||||
#
|
||||
# - ClipTimeEnd: Fusion reset to 0 if duration changes
|
||||
# - We keep the trim out as close as possible to the previous value
|
||||
# within new amount of frames after trim in (ClipTimeStart) has
|
||||
# been set.
|
||||
#
|
||||
# - GlobalIn: Fusion reset to comp's global in if duration changes
|
||||
# - We change it to the "startFrame"
|
||||
#
|
||||
# - GlobalEnd: Fusion resets to globalIn + length if duration changes
|
||||
# - We do the same like Fusion - allow fusion to take control.
|
||||
#
|
||||
# - HoldFirstFrame: Fusion resets this to 0
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - HoldLastFrame: Fusion resets this to 0
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - Reverse: Fusion resets to disabled if "Loop" is not enabled.
|
||||
# - We preserve the value.
|
||||
#
|
||||
# - Depth: Fusion resets to "Format"
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - KeyCode: Fusion resets to ""
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - TimeCodeOffset: Fusion resets to 0
|
||||
# - We preverse the value.
|
||||
#
|
||||
# """
|
||||
#
|
||||
# from avalon.fusion import comp_lock_and_undo_chunk
|
||||
#
|
||||
# tool = container["_tool"]
|
||||
# assert tool.ID == "Loader", "Must be Loader"
|
||||
# comp = tool.Comp()
|
||||
#
|
||||
# root = api.get_representation_path(representation)
|
||||
# path = self._get_first_image(root)
|
||||
#
|
||||
# # Get start frame from version data
|
||||
# version = io.find_one({"type": "version",
|
||||
# "_id": representation["parent"]})
|
||||
# start = version["data"].get("startFrame")
|
||||
# if start is None:
|
||||
# self.log.warning("Missing start frame for updated version"
|
||||
# "assuming starts at frame 0 for: "
|
||||
# "{} ({})".format(tool.Name, representation))
|
||||
# start = 0
|
||||
#
|
||||
# with comp_lock_and_undo_chunk(comp, "Update Loader"):
|
||||
#
|
||||
# # Update the loader's path whilst preserving some values
|
||||
# with preserve_trim(tool, log=self.log):
|
||||
# with preserve_inputs(tool,
|
||||
# inputs=("HoldFirstFrame",
|
||||
# "HoldLastFrame",
|
||||
# "Reverse",
|
||||
# "Depth",
|
||||
# "KeyCode",
|
||||
# "TimeCodeOffset")):
|
||||
# tool["Clip"] = path
|
||||
#
|
||||
# # Set the global in to the start frame of the sequence
|
||||
# global_in_changed = loader_shift(tool, start, relative=False)
|
||||
# if global_in_changed:
|
||||
# # Log this change to the user
|
||||
# self.log.debug("Changed '%s' global in: %d" % (tool.Name,
|
||||
# start))
|
||||
#
|
||||
# # Update the imprinted representation
|
||||
# tool.SetData("avalon.representation", str(representation["_id"]))
|
||||
#
|
||||
# def remove(self, container):
|
||||
#
|
||||
# from avalon.fusion import comp_lock_and_undo_chunk
|
||||
#
|
||||
# tool = container["_tool"]
|
||||
# assert tool.ID == "Loader", "Must be Loader"
|
||||
# comp = tool.Comp()
|
||||
#
|
||||
# with comp_lock_and_undo_chunk(comp, "Remove Loader"):
|
||||
# tool.Delete()
|
||||
#
|
||||
# def _get_first_image(self, root):
|
||||
# """Get first file in representation root"""
|
||||
# files = sorted(os.listdir(root))
|
||||
# return os.path.join(root, files[0])
|
||||
import os
|
||||
import contextlib
|
||||
|
||||
from avalon import api
|
||||
import avalon.io as io
|
||||
|
||||
from avalon.nuke import log
|
||||
import nuke
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_inputs(node, knobs):
|
||||
"""Preserve the node's inputs after context"""
|
||||
|
||||
values = {}
|
||||
for name in knobs:
|
||||
try:
|
||||
knob_value = node[name].vaule()
|
||||
values[name] = knob_value
|
||||
except ValueError:
|
||||
log.warning("missing knob {} in node {}"
|
||||
"{}".format(name, node['name'].value()))
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for name, value in values.items():
|
||||
node[name].setValue(value)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_trim(node):
|
||||
"""Preserve the relative trim of the Loader tool.
|
||||
|
||||
This tries to preserve the loader's trim (trim in and trim out) after
|
||||
the context by reapplying the "amount" it trims on the clip's length at
|
||||
start and end.
|
||||
|
||||
"""
|
||||
# working script frame range
|
||||
script_start = nuke.root()["start_frame"].value()
|
||||
|
||||
start_at_frame = None
|
||||
offset_frame = None
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start))
|
||||
log.info("start frame of reader was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str((script_start + offset_frame)))
|
||||
log.info("start frame of reader was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
|
||||
def loader_shift(node, frame, relative=True):
|
||||
"""Shift global in time by i preserving duration
|
||||
|
||||
This moves the loader by i frames preserving global duration. When relative
|
||||
is False it will shift the global in to the start frame.
|
||||
|
||||
Args:
|
||||
loader (tool): The fusion loader tool.
|
||||
frame (int): The amount of frames to move.
|
||||
relative (bool): When True the shift is relative, else the shift will
|
||||
change the global in to frame.
|
||||
|
||||
Returns:
|
||||
int: The resulting relative frame change (how much it moved)
|
||||
|
||||
"""
|
||||
# working script frame range
|
||||
script_start = nuke.root()["start_frame"].value()
|
||||
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
if relative:
|
||||
shift = frame
|
||||
else:
|
||||
if start_at_frame:
|
||||
shift = frame
|
||||
if offset_frame:
|
||||
shift = frame + offset_frame
|
||||
|
||||
# Shifting global in will try to automatically compensate for the change
|
||||
# in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
|
||||
# input values to "just shift" the clip
|
||||
with preserve_inputs(node, knobs=["file",
|
||||
"first",
|
||||
"last",
|
||||
"originfirst",
|
||||
"originlast",
|
||||
"frame_mode",
|
||||
"frame"]):
|
||||
|
||||
# GlobalIn cannot be set past GlobalOut or vice versa
|
||||
# so we must apply them in the order of the shift.
|
||||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start + shift))
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str(shift))
|
||||
|
||||
return int(shift)
|
||||
|
||||
|
||||
class NukeLoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["imagesequence"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Load sequence"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
ls_img_sequence,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
# Use the first file for now
|
||||
# TODO: fix path fname
|
||||
file = ls_img_sequence(os.path.dirname(self.fname), one=True)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
# TODO: it might be universal read to img/geo/camera
|
||||
r = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(self.name)) # TODO: does self.name exist?
|
||||
r["file"].setValue(file['path'])
|
||||
if len(file['frames']) is 1:
|
||||
first = file['frames'][0][0]
|
||||
last = file['frames'][0][1]
|
||||
r["originfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["originlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
else:
|
||||
first = file['frames'][0][0]
|
||||
last = file['frames'][:-1][1]
|
||||
r["originfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["originlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
log.warning("Missing frames in image sequence")
|
||||
|
||||
# Set global in point to start frame (if in version.data)
|
||||
start = context["version"]["data"].get("startFrame", None)
|
||||
if start is not None:
|
||||
loader_shift(r, start, relative=False)
|
||||
|
||||
containerise(r,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Fusion automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
viewer_update_and_undo_stop,
|
||||
ls_img_sequence,
|
||||
update_container
|
||||
)
|
||||
|
||||
node = container["_tool"]
|
||||
# TODO: prepare also for other readers img/geo/camera
|
||||
assert node.Class() == "Reader", "Must be Reader"
|
||||
|
||||
root = api.get_representation_path(representation)
|
||||
file = ls_img_sequence(os.path.dirname(root), one=True)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({"type": "version",
|
||||
"_id": representation["parent"]})
|
||||
start = version["data"].get("startFrame")
|
||||
if start is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(node['name'].value(), representation))
|
||||
start = 0
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
|
||||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
with preserve_inputs(node,
|
||||
knobs=["file",
|
||||
"first",
|
||||
"last",
|
||||
"originfirst",
|
||||
"originlast",
|
||||
"frame_mode",
|
||||
"frame"]):
|
||||
node["file"] = file["path"]
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
global_in_changed = loader_shift(node, start, relative=False)
|
||||
if global_in_changed:
|
||||
# Log this change to the user
|
||||
log.debug("Changed '{}' global in:"
|
||||
" {:d}".format(node['name'].value(), start))
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
node,
|
||||
{"representation": str(representation["_id"])}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
node = container["_tool"]
|
||||
assert node.Class() == "Reader", "Must be Reader"
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
18
pype/plugins/nuke/publish/collect_current_file.py
Normal file
18
pype/plugins/nuke/publish/collect_current_file.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Current File"
|
||||
hosts = ["nuke"]
|
||||
families = ["studio.workfile"]
|
||||
|
||||
def process(self, context):
|
||||
import os
|
||||
import nuke
|
||||
current_file = nuke.root().name()
|
||||
normalised = os.path.normpath(current_file)
|
||||
|
||||
context.data["currentFile"] = normalised
|
||||
17
pype/plugins/nuke/publish/collect_framerate.py
Normal file
17
pype/plugins/nuke/publish/collect_framerate.py
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFramerate(pyblish.api.ContextPlugin):
|
||||
"""Collect framerate."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Framerate"
|
||||
hosts = [
|
||||
"nuke",
|
||||
"nukeassist"
|
||||
]
|
||||
|
||||
def process(self, context):
|
||||
context.data["framerate"] = nuke.root()["fps"].getValue()
|
||||
14
pype/plugins/nuke/publish/collect_host.py
Normal file
14
pype/plugins/nuke/publish/collect_host.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHost(pyblish.api.ContextPlugin):
|
||||
"""Inject the host into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Host"
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, context):
|
||||
import pyblish.api
|
||||
|
||||
context.data["host"] = pyblish.api.current_host()
|
||||
13
pype/plugins/nuke/publish/collect_host_version.py
Normal file
13
pype/plugins/nuke/publish/collect_host_version.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHostVersion(pyblish.api.ContextPlugin):
|
||||
"""Inject the hosts version into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Host Version"
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, context):
|
||||
import nuke
|
||||
context.data["hostVersion"] = nuke.NUKE_VERSION_STRING
|
||||
14
pype/plugins/nuke/publish/collect_selection.py
Normal file
14
pype/plugins/nuke/publish/collect_selection.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectSelection(pyblish.api.ContextPlugin):
|
||||
"""Collect selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Selection of Nodes"
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, context):
|
||||
context.data["selection"] = nuke.selectedNodes()
|
||||
29
pype/plugins/nuke/publish/extract_output_directory.py
Normal file
29
pype/plugins/nuke/publish/extract_output_directory.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ExtractOutputDirectory(pyblish.api.InstancePlugin):
|
||||
"""Extracts the output path for any collection or single output_path."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Output Directory"
|
||||
optional = True
|
||||
|
||||
# targets = ["process"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
path = None
|
||||
|
||||
if "collection" in instance.data.keys():
|
||||
path = instance.data["collection"].format()
|
||||
|
||||
if "output_path" in instance.data.keys():
|
||||
path = instance.data["output_path"]
|
||||
|
||||
if not path:
|
||||
return
|
||||
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
15
pype/plugins/nuke/publish/extract_script_save.py
Normal file
15
pype/plugins/nuke/publish/extract_script_save.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
import nuke
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ExtractScriptSave(pyblish.api.InstancePlugin):
|
||||
""" Saves the script before extraction. """
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.49
|
||||
label = "Script Save"
|
||||
hosts = ["nuke"]
|
||||
families = ["studio.saver"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
nuke.scriptSave()
|
||||
68
pype/plugins/nuke/publish/validate_nuke_settings.py
Normal file
68
pype/plugins/nuke/publish/validate_nuke_settings.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
import nuke
|
||||
import os
|
||||
import pyblish.api
|
||||
import avalon.io as io
|
||||
# TODO: add repair function
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class ValidateSettingsNuke(pyblish.api.Validator):
|
||||
""" Validates settings """
|
||||
|
||||
families = ['scene']
|
||||
hosts = ['nuke']
|
||||
optional = True
|
||||
label = 'Settings'
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
asset = io.find_one({"name": os.environ['AVALON_ASSET']})
|
||||
try:
|
||||
avalon_resolution = asset["data"].get("resolution", '')
|
||||
avalon_pixel_aspect = asset["data"].get("pixel_aspect", '')
|
||||
avalon_fps = asset["data"].get("fps", '')
|
||||
avalon_first = asset["data"].get("edit_in", '')
|
||||
avalon_last = asset["data"].get("edit_out", '')
|
||||
avalon_crop = asset["data"].get("crop", '')
|
||||
except KeyError:
|
||||
print(
|
||||
"No resolution information found for \"{0}\".".format(
|
||||
asset["name"]
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# validating first frame
|
||||
local_first = nuke.root()['first_frame'].value()
|
||||
msg = 'First frame is incorrect.'
|
||||
msg += '\n\nLocal first: %s' % local_first
|
||||
msg += '\n\nOnline first: %s' % avalon_first
|
||||
assert local_first == avalon_first, msg
|
||||
|
||||
# validating last frame
|
||||
local_last = nuke.root()['last_frame'].value()
|
||||
msg = 'Last frame is incorrect.'
|
||||
msg += '\n\nLocal last: %s' % local_last
|
||||
msg += '\n\nOnline last: %s' % avalon_last
|
||||
assert local_last == avalon_last, msg
|
||||
|
||||
# validating fps
|
||||
local_fps = nuke.root()['fps'].value()
|
||||
msg = 'FPS is incorrect.'
|
||||
msg += '\n\nLocal fps: %s' % local_fps
|
||||
msg += '\n\nOnline fps: %s' % avalon_fps
|
||||
assert local_fps == avalon_fps, msg
|
||||
|
||||
# validating resolution width
|
||||
local_width = nuke.root().format().width()
|
||||
msg = 'Width is incorrect.'
|
||||
msg += '\n\nLocal width: %s' % local_width
|
||||
msg += '\n\nOnline width: %s' % avalon_resolution[0]
|
||||
assert local_width == avalon_resolution[0], msg
|
||||
|
||||
# validating resolution width
|
||||
local_height = nuke.root().format().height()
|
||||
msg = 'Height is incorrect.'
|
||||
msg += '\n\nLocal height: %s' % local_height
|
||||
msg += '\n\nOnline height: %s' % avalon_resolution[1]
|
||||
assert local_height == avalon_resolution[1], msg
|
||||
20
pype/plugins/nuke/publish/validate_prerenders_output.py
Normal file
20
pype/plugins/nuke/publish/validate_prerenders_output.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class ValidatePrerendersOutput(pyblish.api.Validator):
|
||||
"""Validates that the output directory for the write nodes exists"""
|
||||
|
||||
families = ['write.prerender']
|
||||
hosts = ['nuke']
|
||||
label = 'Pre-renders output'
|
||||
|
||||
def process(self, instance):
|
||||
path = os.path.dirname(instance[0]['file'].value())
|
||||
|
||||
if 'output' not in path:
|
||||
name = instance[0].name()
|
||||
msg = 'Output directory for %s is not in an "output" folder.' % name
|
||||
|
||||
raise ValueError(msg)
|
||||
33
pype/plugins/nuke/publish/validate_proxy_mode.py
Normal file
33
pype/plugins/nuke/publish/validate_proxy_mode.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class RepairNukeProxyModeAction(pyblish.api.Action):
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
nuke.root()["proxy"].setValue(0)
|
||||
|
||||
|
||||
class ValidateNukeProxyMode(pyblish.api.ContextPlugin):
|
||||
"""Validates against having proxy mode on."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
label = "Proxy Mode"
|
||||
actions = [RepairNukeProxyModeAction]
|
||||
hosts = ["nuke", "nukeassist"]
|
||||
# targets = ["default", "process"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
msg = (
|
||||
"Proxy mode is not supported. Please disable Proxy Mode in the "
|
||||
"Project settings."
|
||||
)
|
||||
assert not nuke.root()["proxy"].getValue(), msg
|
||||
56
pype/plugins/nuke/publish/validate_write_nodes.py
Normal file
56
pype/plugins/nuke/publish/validate_write_nodes.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import pype.utils
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class RepairNukeWriteNodeAction(pyblish.api.Action):
|
||||
label = "Repair"
|
||||
on = "failed"
|
||||
icon = "wrench"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
instances = pype.utils.filter_instances(context, plugin)
|
||||
for instance in instances:
|
||||
|
||||
if "create_directories" in instance[0].knobs():
|
||||
instance[0]['create_directories'].setValue(True)
|
||||
else:
|
||||
path, file = os.path.split(instance[0].data['outputFilename'])
|
||||
self.log.info(path)
|
||||
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
if "metadata" in instance[0].knobs().keys():
|
||||
instance[0]["metadata"].setValue("all metadata")
|
||||
|
||||
|
||||
class ValidateNukeWriteNode(pyblish.api.InstancePlugin):
|
||||
""" Validates file output. """
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
families = ["write.render"]
|
||||
label = "Write Node"
|
||||
actions = [RepairNukeWriteNodeAction]
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Validate output directory exists, if not creating directories.
|
||||
# The existence of the knob is queried because previous version
|
||||
# of Nuke did not have this feature.
|
||||
if "create_directories" in instance[0].knobs():
|
||||
msg = "Use Create Directories"
|
||||
assert instance[0].knobs()['create_directories'].value() is True, msg
|
||||
else:
|
||||
path, file = os.path.split(instance.data['outputFilename'])
|
||||
msg = "Output directory doesn't exist: \"{0}\"".format(path)
|
||||
assert os.path.exists(path), msg
|
||||
|
||||
# Validate metadata knob
|
||||
if "metadata" in instance[0].knobs().keys():
|
||||
msg = "Metadata needs to be set to \"all metadata\"."
|
||||
assert instance[0]["metadata"].value() == "all metadata", msg
|
||||
98
pype/utils/__init__.py
Normal file
98
pype/utils/__init__.py
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
from .lib import *
|
||||
|
||||
|
||||
def load_capture_preset(path):
|
||||
import capture_gui
|
||||
import capture
|
||||
|
||||
path = path
|
||||
preset = capture_gui.lib.load_json(path)
|
||||
print preset
|
||||
|
||||
options = dict()
|
||||
|
||||
# CODEC
|
||||
id = 'Codec'
|
||||
for key in preset[id]:
|
||||
options[str(key)] = preset[id][key]
|
||||
|
||||
# GENERIC
|
||||
id = 'Generic'
|
||||
for key in preset[id]:
|
||||
if key.startswith('isolate'):
|
||||
pass
|
||||
# options['isolate'] = preset[id][key]
|
||||
else:
|
||||
options[str(key)] = preset[id][key]
|
||||
|
||||
# RESOLUTION
|
||||
id = 'Resolution'
|
||||
options['height'] = preset[id]['height']
|
||||
options['width'] = preset[id]['width']
|
||||
|
||||
# DISPLAY OPTIONS
|
||||
id = 'Display Options'
|
||||
disp_options = {}
|
||||
for key in preset['Display Options']:
|
||||
if key.startswith('background'):
|
||||
disp_options[key] = preset['Display Options'][key]
|
||||
else:
|
||||
disp_options['displayGradient'] = True
|
||||
|
||||
options['display_options'] = disp_options
|
||||
|
||||
# VIEWPORT OPTIONS
|
||||
temp_options = {}
|
||||
id = 'Renderer'
|
||||
for key in preset[id]:
|
||||
temp_options[str(key)] = preset[id][key]
|
||||
|
||||
temp_options2 = {}
|
||||
id = 'Viewport Options'
|
||||
light_options = {0: "default",
|
||||
1: 'all',
|
||||
2: 'selected',
|
||||
3: 'flat',
|
||||
4: 'nolights'}
|
||||
for key in preset[id]:
|
||||
if key == 'high_quality':
|
||||
temp_options2['multiSampleEnable'] = True
|
||||
temp_options2['multiSampleCount'] = 4
|
||||
temp_options2['textureMaxResolution'] = 512
|
||||
temp_options2['enableTextureMaxRes'] = True
|
||||
|
||||
if key == 'alphaCut':
|
||||
temp_options2['transparencyAlgorithm'] = 5
|
||||
temp_options2['transparencyQuality'] = 1
|
||||
|
||||
if key == 'headsUpDisplay':
|
||||
temp_options['headsUpDisplay'] = True
|
||||
|
||||
if key == 'displayLights':
|
||||
temp_options[str(key)] = light_options[preset[id][key]]
|
||||
else:
|
||||
temp_options[str(key)] = preset[id][key]
|
||||
|
||||
for key in ['override_viewport_options', 'high_quality', 'alphaCut']:
|
||||
temp_options.pop(key, None)
|
||||
|
||||
options['viewport_options'] = temp_options
|
||||
options['viewport2_options'] = temp_options2
|
||||
|
||||
# use active sound track
|
||||
scene = capture.parse_active_scene()
|
||||
options['sound'] = scene['sound']
|
||||
cam_options = dict()
|
||||
cam_options['overscan'] = 1.0
|
||||
cam_options['displayFieldChart'] = False
|
||||
cam_options['displayFilmGate'] = False
|
||||
cam_options['displayFilmOrigin'] = False
|
||||
cam_options['displayFilmPivot'] = False
|
||||
cam_options['displayGateMask'] = False
|
||||
cam_options['displayResolution'] = False
|
||||
cam_options['displaySafeAction'] = False
|
||||
cam_options['displaySafeTitle'] = False
|
||||
|
||||
# options['display_options'] = temp_options
|
||||
|
||||
return options
|
||||
107
pype/utils/lib.py
Normal file
107
pype/utils/lib.py
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
import re
|
||||
import tempfile
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import pyblish.api
|
||||
|
||||
print 'pyblish_utils loaded'
|
||||
|
||||
|
||||
def save_preset(path, preset):
|
||||
"""Save options to path"""
|
||||
with open(path, "w") as f:
|
||||
json.dump(preset, f)
|
||||
|
||||
|
||||
def load_preset(path):
|
||||
"""Load options json from path"""
|
||||
with open(path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def temp_dir(context):
|
||||
"""Provide a temporary directory in which to store extracted files"""
|
||||
extract_dir = context.data('extractDir')
|
||||
|
||||
if not extract_dir:
|
||||
extract_dir = tempfile.mkdtemp()
|
||||
context.set_data('extractDir', value=extract_dir)
|
||||
|
||||
return extract_dir
|
||||
|
||||
|
||||
def version_get(string, prefix, suffix=None):
|
||||
"""Extract version information from filenames. Code from Foundry's nukescripts.version_get()"""
|
||||
|
||||
if string is None:
|
||||
raise ValueError, "Empty version string - no match"
|
||||
|
||||
regex = "[/_.]" + prefix + "\d+"
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
if not len(matches):
|
||||
msg = "No \"_" + prefix + "#\" found in \"" + string + "\""
|
||||
raise ValueError, msg
|
||||
return (matches[-1:][0][1], re.search("\d+", matches[-1:][0]).group())
|
||||
|
||||
|
||||
def version_set(string, prefix, oldintval, newintval):
|
||||
"""Changes version information from filenames. Code from Foundry's nukescripts.version_set()"""
|
||||
|
||||
regex = "[/_.]" + prefix + "\d+"
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
if not len(matches):
|
||||
return ""
|
||||
|
||||
# Filter to retain only version strings with matching numbers
|
||||
matches = filter(lambda s: int(s[2:]) == oldintval, matches)
|
||||
|
||||
# Replace all version strings with matching numbers
|
||||
for match in matches:
|
||||
# use expression instead of expr so 0 prefix does not make octal
|
||||
fmt = "%%(#)0%dd" % (len(match) - 2)
|
||||
newfullvalue = match[0] + prefix + str(fmt % {"#": newintval})
|
||||
string = re.sub(match, newfullvalue, string)
|
||||
return string
|
||||
|
||||
|
||||
def version_up(string):
|
||||
|
||||
try:
|
||||
(prefix, v) = version_get(string, 'v')
|
||||
v = int(v)
|
||||
file = version_set(string, prefix, v, v + 1)
|
||||
except:
|
||||
raise ValueError, 'Unable to version up File'
|
||||
|
||||
return file
|
||||
|
||||
|
||||
def open_folder(path):
|
||||
"""Provide a temporary directory in which to store extracted files"""
|
||||
import subprocess
|
||||
path = os.path.abspath(path)
|
||||
if sys.platform == 'win32':
|
||||
subprocess.Popen('explorer "%s"' % path)
|
||||
elif sys.platform == 'darwin': # macOS
|
||||
subprocess.Popen(['open', path])
|
||||
else: # linux
|
||||
try:
|
||||
subprocess.Popen(['xdg-open', path])
|
||||
except OSError:
|
||||
raise OSError('unsupported xdg-open call??')
|
||||
|
||||
|
||||
def filter_instances(context, plugin):
|
||||
"""Provide a temporary directory in which to store extracted files"""
|
||||
# Get the errored instances
|
||||
allInstances = []
|
||||
for result in context.data["results"]:
|
||||
if (result["instance"] is not None and
|
||||
result["instance"] not in allInstances):
|
||||
allInstances.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(allInstances, plugin)
|
||||
|
||||
return instances
|
||||
292
pype/vendor/clique/__init__.py
vendored
Normal file
292
pype/vendor/clique/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
from ._version import __version__
|
||||
from .collection import Collection
|
||||
from .error import CollectionError
|
||||
# was changed for ftrack-api
|
||||
from six import string_types
|
||||
|
||||
|
||||
#: Pattern for matching an index with optional padding.
|
||||
DIGITS_PATTERN = '(?P<index>(?P<padding>0*)\d+)'
|
||||
|
||||
#: Common patterns that can be passed to :py:func:`~clique.assemble`.
|
||||
PATTERNS = {
|
||||
'frames': '\.{0}\.\D+\d?$'.format(DIGITS_PATTERN),
|
||||
'versions': 'v{0}'.format(DIGITS_PATTERN)
|
||||
}
|
||||
|
||||
|
||||
def split(fname):
|
||||
'''Split `fname` into ({head}, {index} and {tail})
|
||||
|
||||
Example:
|
||||
>>> split('rs_beauty.1000.png')
|
||||
('rs_beauty.', '1000', '.png')
|
||||
>>> split('myRender.0100.png')
|
||||
('myRender.', '0100', '.png')
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
collections, _ = assemble([fname], minimum_items=1)
|
||||
except IndexError:
|
||||
raise ValueError("No collection found")
|
||||
else:
|
||||
# Search for indexes starting from end, as opposed to start
|
||||
# E.g. myRender2017.001.png -> myRender2017.%03d.png
|
||||
# As opposed to -> myRender%d.%001.png
|
||||
col = collections[-1]
|
||||
idx = list(col.indexes)[0]
|
||||
|
||||
return (col.head,
|
||||
str(idx).zfill(col.padding),
|
||||
col.tail)
|
||||
|
||||
|
||||
def assemble(iterable, patterns=None, minimum_items=2, case_sensitive=True):
|
||||
'''Assemble items in *iterable* into discreet collections.
|
||||
|
||||
*patterns* may be specified as a list of regular expressions to limit
|
||||
the returned collection possibilities. Use this when interested in
|
||||
collections that only match specific patterns. Each pattern must contain
|
||||
the expression from :py:data:`DIGITS_PATTERN` exactly once.
|
||||
|
||||
A selection of common expressions are available in :py:data:`PATTERNS`.
|
||||
|
||||
.. note::
|
||||
|
||||
If a pattern is supplied as a string it will be automatically compiled
|
||||
to a :py:class:`re.RegexObject` instance for convenience.
|
||||
|
||||
When *patterns* is not specified, collections are formed by examining all
|
||||
possible groupings of the items in *iterable* based around common numerical
|
||||
components.
|
||||
|
||||
*minimum_items* dictates the minimum number of items a collection must have
|
||||
in order to be included in the result. The default is 2, filtering out
|
||||
single item collections.
|
||||
|
||||
If *case_sensitive* is False, then items will be treated as part of the same
|
||||
collection when they only differ in casing. To avoid ambiguity, the
|
||||
resulting collection will always be lowercase. For example, "item.0001.dpx"
|
||||
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
|
||||
|
||||
.. note::
|
||||
|
||||
Any compiled *patterns* will also respect the set case sensitivity.
|
||||
|
||||
Return tuple of two lists (collections, remainder) where 'collections' is a
|
||||
list of assembled :py:class:`~clique.collection.Collection` instances and
|
||||
'remainder' is a list of items that did not belong to any collection.
|
||||
|
||||
'''
|
||||
collection_map = defaultdict(set)
|
||||
collections = []
|
||||
remainder = []
|
||||
|
||||
# Compile patterns.
|
||||
flags = 0
|
||||
if not case_sensitive:
|
||||
flags |= re.IGNORECASE
|
||||
|
||||
compiled_patterns = []
|
||||
|
||||
if patterns is not None:
|
||||
if not patterns:
|
||||
return collections, list(iterable)
|
||||
|
||||
for pattern in patterns:
|
||||
if isinstance(pattern, string_types):
|
||||
compiled_patterns.append(re.compile(pattern, flags=flags))
|
||||
else:
|
||||
compiled_patterns.append(pattern)
|
||||
|
||||
else:
|
||||
compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags))
|
||||
|
||||
# Process iterable.
|
||||
for item in iterable:
|
||||
matched = False
|
||||
|
||||
for pattern in compiled_patterns:
|
||||
for match in pattern.finditer(item):
|
||||
index = match.group('index')
|
||||
|
||||
head = item[:match.start('index')]
|
||||
tail = item[match.end('index'):]
|
||||
|
||||
if not case_sensitive:
|
||||
head = head.lower()
|
||||
tail = tail.lower()
|
||||
|
||||
padding = match.group('padding')
|
||||
if padding:
|
||||
padding = len(index)
|
||||
else:
|
||||
padding = 0
|
||||
|
||||
key = (head, tail, padding)
|
||||
collection_map[key].add(int(index))
|
||||
matched = True
|
||||
|
||||
if not matched:
|
||||
remainder.append(item)
|
||||
|
||||
# Form collections.
|
||||
merge_candidates = []
|
||||
for (head, tail, padding), indexes in collection_map.items():
|
||||
collection = Collection(head, tail, padding, indexes)
|
||||
collections.append(collection)
|
||||
|
||||
if collection.padding == 0:
|
||||
merge_candidates.append(collection)
|
||||
|
||||
# Merge together collections that align on padding boundaries. For example,
|
||||
# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only
|
||||
# indexes within the padding width limit are merged. If a collection is
|
||||
# entirely merged into another then it will not be included as a separate
|
||||
# collection in the results.
|
||||
fully_merged = []
|
||||
for collection in collections:
|
||||
if collection.padding == 0:
|
||||
continue
|
||||
|
||||
for candidate in merge_candidates:
|
||||
if (candidate.head == collection.head and
|
||||
candidate.tail == collection.tail):
|
||||
|
||||
merged_index_count = 0
|
||||
for index in candidate.indexes:
|
||||
if len(str(abs(index))) == collection.padding:
|
||||
collection.indexes.add(index)
|
||||
merged_index_count += 1
|
||||
|
||||
if merged_index_count == len(candidate.indexes):
|
||||
fully_merged.append(candidate)
|
||||
|
||||
# Filter out fully merged collections.
|
||||
collections = [collection for collection in collections
|
||||
if collection not in fully_merged]
|
||||
|
||||
# Filter out collections that do not have at least as many indexes as
|
||||
# minimum_items. In addition, add any members of a filtered collection,
|
||||
# which are not members of an unfiltered collection, to the remainder.
|
||||
filtered = []
|
||||
remainder_candidates = []
|
||||
for collection in collections:
|
||||
if len(collection.indexes) >= minimum_items:
|
||||
filtered.append(collection)
|
||||
else:
|
||||
for member in collection:
|
||||
remainder_candidates.append(member)
|
||||
|
||||
for candidate in remainder_candidates:
|
||||
# Check if candidate has already been added to remainder to avoid
|
||||
# duplicate entries.
|
||||
if candidate in remainder:
|
||||
continue
|
||||
|
||||
has_membership = False
|
||||
|
||||
for collection in filtered:
|
||||
if candidate in collection:
|
||||
has_membership = True
|
||||
break
|
||||
|
||||
if not has_membership:
|
||||
remainder.append(candidate)
|
||||
|
||||
return filtered, remainder
|
||||
|
||||
|
||||
def parse(value, pattern='{head}{padding}{tail} [{ranges}]'):
|
||||
'''Parse *value* into a :py:class:`~clique.collection.Collection`.
|
||||
|
||||
Use *pattern* to extract information from *value*. It may make use of the
|
||||
following keys:
|
||||
|
||||
* *head* - Common leading part of the collection.
|
||||
* *tail* - Common trailing part of the collection.
|
||||
* *padding* - Padding value in ``%0d`` format.
|
||||
* *range* - Total range in the form ``start-end``.
|
||||
* *ranges* - Comma separated ranges of indexes.
|
||||
* *holes* - Comma separated ranges of missing indexes.
|
||||
|
||||
.. note::
|
||||
|
||||
*holes* only makes sense if *range* or *ranges* is also present.
|
||||
|
||||
'''
|
||||
# Construct regular expression for given pattern.
|
||||
expressions = {
|
||||
'head': '(?P<head>.*)',
|
||||
'tail': '(?P<tail>.*)',
|
||||
'padding': '%(?P<padding>\d*)d',
|
||||
'range': '(?P<range>\d+-\d+)?',
|
||||
'ranges': '(?P<ranges>[\d ,\-]+)?',
|
||||
'holes': '(?P<holes>[\d ,\-]+)'
|
||||
}
|
||||
|
||||
pattern_regex = re.escape(pattern)
|
||||
for key, expression in expressions.items():
|
||||
pattern_regex = pattern_regex.replace(
|
||||
'\{{{0}\}}'.format(key),
|
||||
expression
|
||||
)
|
||||
pattern_regex = '^{0}$'.format(pattern_regex)
|
||||
|
||||
# Match pattern against value and use results to construct collection.
|
||||
match = re.search(pattern_regex, value)
|
||||
if match is None:
|
||||
raise ValueError('Value did not match pattern.')
|
||||
|
||||
groups = match.groupdict()
|
||||
if 'padding' in groups and groups['padding']:
|
||||
groups['padding'] = int(groups['padding'])
|
||||
else:
|
||||
groups['padding'] = 0
|
||||
|
||||
# Create collection and then add indexes.
|
||||
collection = Collection(
|
||||
groups.get('head', ''),
|
||||
groups.get('tail', ''),
|
||||
groups['padding']
|
||||
)
|
||||
|
||||
if groups.get('range', None) is not None:
|
||||
start, end = map(int, groups['range'].split('-'))
|
||||
collection.indexes.update(range(start, end + 1))
|
||||
|
||||
if groups.get('ranges', None) is not None:
|
||||
parts = [part.strip() for part in groups['ranges'].split(',')]
|
||||
for part in parts:
|
||||
index_range = list(map(int, part.split('-', 2)))
|
||||
|
||||
if len(index_range) > 1:
|
||||
# Index range.
|
||||
for index in range(index_range[0], index_range[1] + 1):
|
||||
collection.indexes.add(index)
|
||||
else:
|
||||
# Single index.
|
||||
collection.indexes.add(index_range[0])
|
||||
|
||||
if 'holes' in groups:
|
||||
parts = [part.strip() for part in groups['holes'].split(',')]
|
||||
for part in parts:
|
||||
index_range = map(int, part.split('-', 2))
|
||||
|
||||
if len(index_range) > 1:
|
||||
# Index range.
|
||||
for index in range(index_range[0], index_range[1] + 1):
|
||||
collection.indexes.remove(index)
|
||||
else:
|
||||
# Single index.
|
||||
collection.indexes.remove(index_range[0])
|
||||
|
||||
return collection
|
||||
2
pype/vendor/clique/_version.py
vendored
Normal file
2
pype/vendor/clique/_version.py
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
__version__ = '1.3.1'
|
||||
|
||||
383
pype/vendor/clique/collection.py
vendored
Normal file
383
pype/vendor/clique/collection.py
vendored
Normal file
|
|
@ -0,0 +1,383 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
import re
|
||||
|
||||
from . import descriptor, error, sorted_set
|
||||
|
||||
|
||||
class Collection(object):
|
||||
'''Represent group of items that differ only by numerical component.'''
|
||||
|
||||
indexes = descriptor.Unsettable('indexes')
|
||||
|
||||
def __init__(self, head, tail, padding, indexes=None):
|
||||
'''Initialise collection.
|
||||
|
||||
*head* is the leading common part whilst *tail* is the trailing
|
||||
common part.
|
||||
|
||||
*padding* specifies the "width" of the numerical component. An index
|
||||
will be padded with zeros to fill this width. A *padding* of zero
|
||||
implies no padding and width may be any size so long as no leading
|
||||
zeros are present.
|
||||
|
||||
*indexes* can specify a set of numerical indexes to initially populate
|
||||
the collection with.
|
||||
|
||||
.. note::
|
||||
|
||||
After instantiation, the ``indexes`` attribute cannot be set to a
|
||||
new value using assignment::
|
||||
|
||||
>>> collection.indexes = [1, 2, 3]
|
||||
AttributeError: Cannot set attribute defined as unsettable.
|
||||
|
||||
Instead, manipulate it directly::
|
||||
|
||||
>>> collection.indexes.clear()
|
||||
>>> collection.indexes.update([1, 2, 3])
|
||||
|
||||
'''
|
||||
super(Collection, self).__init__()
|
||||
self.__dict__['indexes'] = sorted_set.SortedSet()
|
||||
self._head = head
|
||||
self._tail = tail
|
||||
self.padding = padding
|
||||
self._update_expression()
|
||||
|
||||
if indexes is not None:
|
||||
self.indexes.update(indexes)
|
||||
|
||||
@property
|
||||
def head(self):
|
||||
'''Return common leading part.'''
|
||||
return self._head
|
||||
|
||||
@head.setter
|
||||
def head(self, value):
|
||||
'''Set common leading part to *value*.'''
|
||||
self._head = value
|
||||
self._update_expression()
|
||||
|
||||
@property
|
||||
def tail(self):
|
||||
'''Return common trailing part.'''
|
||||
return self._tail
|
||||
|
||||
@tail.setter
|
||||
def tail(self, value):
|
||||
'''Set common trailing part to *value*.'''
|
||||
self._tail = value
|
||||
self._update_expression()
|
||||
|
||||
def _update_expression(self):
|
||||
'''Update internal expression.'''
|
||||
self._expression = re.compile(
|
||||
'^{0}(?P<index>(?P<padding>0*)\d+?){1}$'
|
||||
.format(re.escape(self.head), re.escape(self.tail))
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
'''Return string represenation.'''
|
||||
return self.format()
|
||||
|
||||
def __repr__(self):
|
||||
'''Return representation.'''
|
||||
return '<{0} "{1}">'.format(self.__class__.__name__, self)
|
||||
|
||||
def __iter__(self):
|
||||
'''Return iterator over items in collection.'''
|
||||
for index in self.indexes:
|
||||
formatted_index = '{0:0{1}d}'.format(index, self.padding)
|
||||
item = '{0}{1}{2}'.format(self.head, formatted_index, self.tail)
|
||||
yield item
|
||||
|
||||
def __contains__(self, item):
|
||||
'''Return whether *item* is present in collection.'''
|
||||
match = self.match(item)
|
||||
if not match:
|
||||
return False
|
||||
|
||||
if not int(match.group('index')) in self.indexes:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __eq__(self, other):
|
||||
'''Return whether *other* collection is equal.'''
|
||||
if not isinstance(other, Collection):
|
||||
return NotImplemented
|
||||
|
||||
return all([
|
||||
other.head == self.head,
|
||||
other.tail == self.tail,
|
||||
other.padding == self.padding,
|
||||
other.indexes == self.indexes
|
||||
])
|
||||
|
||||
def __ne__(self, other):
|
||||
'''Return whether *other* collection is not equal.'''
|
||||
result = self.__eq__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
return not result
|
||||
|
||||
def __gt__(self, other):
|
||||
'''Return whether *other* collection is greater than.'''
|
||||
if not isinstance(other, Collection):
|
||||
return NotImplemented
|
||||
|
||||
a = (self.head, self.tail, self.padding, len(self.indexes))
|
||||
b = (other.head, other.tail, other.padding, len(other.indexes))
|
||||
|
||||
return a > b
|
||||
|
||||
def __lt__(self, other):
|
||||
'''Return whether *other* collection is less than.'''
|
||||
result = self.__gt__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
return not result
|
||||
|
||||
def __ge__(self, other):
|
||||
'''Return whether *other* collection is greater than or equal.'''
|
||||
result = self.__eq__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
if result is False:
|
||||
result = self.__gt__(other)
|
||||
|
||||
return result
|
||||
|
||||
def __le__(self, other):
|
||||
'''Return whether *other* collection is less than or equal.'''
|
||||
result = self.__eq__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
if result is False:
|
||||
result = self.__lt__(other)
|
||||
|
||||
return result
|
||||
|
||||
def match(self, item):
|
||||
'''Return whether *item* matches this collection expression.
|
||||
|
||||
If a match is successful return data about the match otherwise return
|
||||
None.
|
||||
|
||||
'''
|
||||
match = self._expression.match(item)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
index = match.group('index')
|
||||
padded = False
|
||||
if match.group('padding'):
|
||||
padded = True
|
||||
|
||||
if self.padding == 0:
|
||||
if padded:
|
||||
return None
|
||||
|
||||
elif len(index) != self.padding:
|
||||
return None
|
||||
|
||||
return match
|
||||
|
||||
def add(self, item):
|
||||
'''Add *item* to collection.
|
||||
|
||||
raise :py:class:`~error.CollectionError` if *item* cannot be
|
||||
added to the collection.
|
||||
|
||||
'''
|
||||
match = self.match(item)
|
||||
if match is None:
|
||||
raise error.CollectionError(
|
||||
'Item does not match collection expression.'
|
||||
)
|
||||
|
||||
self.indexes.add(int(match.group('index')))
|
||||
|
||||
def remove(self, item):
|
||||
'''Remove *item* from collection.
|
||||
|
||||
raise :py:class:`~error.CollectionError` if *item* cannot be
|
||||
removed from the collection.
|
||||
|
||||
'''
|
||||
match = self.match(item)
|
||||
if match is None:
|
||||
raise error.CollectionError(
|
||||
'Item not present in collection.'
|
||||
)
|
||||
|
||||
index = int(match.group('index'))
|
||||
try:
|
||||
self.indexes.remove(index)
|
||||
except KeyError:
|
||||
raise error.CollectionError(
|
||||
'Item not present in collection.'
|
||||
)
|
||||
|
||||
def format(self, pattern='{head}{padding}{tail} [{ranges}]'):
|
||||
'''Return string representation as specified by *pattern*.
|
||||
|
||||
Pattern can be any format accepted by Python's standard format function
|
||||
and will receive the following keyword arguments as context:
|
||||
|
||||
* *head* - Common leading part of the collection.
|
||||
* *tail* - Common trailing part of the collection.
|
||||
* *padding* - Padding value in ``%0d`` format.
|
||||
* *range* - Total range in the form ``start-end``
|
||||
* *ranges* - Comma separated ranges of indexes.
|
||||
* *holes* - Comma separated ranges of missing indexes.
|
||||
|
||||
'''
|
||||
data = {}
|
||||
data['head'] = self.head
|
||||
data['tail'] = self.tail
|
||||
|
||||
if self.padding:
|
||||
data['padding'] = '%0{0}d'.format(self.padding)
|
||||
else:
|
||||
data['padding'] = '%d'
|
||||
|
||||
if '{holes}' in pattern:
|
||||
data['holes'] = self.holes().format('{ranges}')
|
||||
|
||||
if '{range}' in pattern or '{ranges}' in pattern:
|
||||
indexes = list(self.indexes)
|
||||
indexes_count = len(indexes)
|
||||
|
||||
if indexes_count == 0:
|
||||
data['range'] = ''
|
||||
|
||||
elif indexes_count == 1:
|
||||
data['range'] = '{0}'.format(indexes[0])
|
||||
|
||||
else:
|
||||
data['range'] = '{0}-{1}'.format(
|
||||
indexes[0], indexes[-1]
|
||||
)
|
||||
|
||||
if '{ranges}' in pattern:
|
||||
separated = self.separate()
|
||||
if len(separated) > 1:
|
||||
ranges = [collection.format('{range}')
|
||||
for collection in separated]
|
||||
|
||||
else:
|
||||
ranges = [data['range']]
|
||||
|
||||
data['ranges'] = ', '.join(ranges)
|
||||
|
||||
return pattern.format(**data)
|
||||
|
||||
def is_contiguous(self):
|
||||
'''Return whether entire collection is contiguous.'''
|
||||
previous = None
|
||||
for index in self.indexes:
|
||||
if previous is None:
|
||||
previous = index
|
||||
continue
|
||||
|
||||
if index != (previous + 1):
|
||||
return False
|
||||
|
||||
previous = index
|
||||
|
||||
return True
|
||||
|
||||
def holes(self):
|
||||
'''Return holes in collection.
|
||||
|
||||
Return :py:class:`~clique.collection.Collection` of missing indexes.
|
||||
|
||||
'''
|
||||
missing = set([])
|
||||
previous = None
|
||||
for index in self.indexes:
|
||||
if previous is None:
|
||||
previous = index
|
||||
continue
|
||||
|
||||
if index != (previous + 1):
|
||||
missing.update(range(previous + 1, index))
|
||||
|
||||
previous = index
|
||||
|
||||
return Collection(self.head, self.tail, self.padding, indexes=missing)
|
||||
|
||||
def is_compatible(self, collection):
|
||||
'''Return whether *collection* is compatible with this collection.
|
||||
|
||||
To be compatible *collection* must have the same head, tail and padding
|
||||
properties as this collection.
|
||||
|
||||
'''
|
||||
return all([
|
||||
isinstance(collection, Collection),
|
||||
collection.head == self.head,
|
||||
collection.tail == self.tail,
|
||||
collection.padding == self.padding
|
||||
])
|
||||
|
||||
def merge(self, collection):
|
||||
'''Merge *collection* into this collection.
|
||||
|
||||
If the *collection* is compatible with this collection then update
|
||||
indexes with all indexes in *collection*.
|
||||
|
||||
raise :py:class:`~error.CollectionError` if *collection* is not
|
||||
compatible with this collection.
|
||||
|
||||
'''
|
||||
if not self.is_compatible(collection):
|
||||
raise error.CollectionError('Collection is not compatible '
|
||||
'with this collection.')
|
||||
|
||||
self.indexes.update(collection.indexes)
|
||||
|
||||
def separate(self):
|
||||
'''Return contiguous parts of collection as separate collections.
|
||||
|
||||
Return as list of :py:class:`~clique.collection.Collection` instances.
|
||||
|
||||
'''
|
||||
collections = []
|
||||
start = None
|
||||
end = None
|
||||
|
||||
for index in self.indexes:
|
||||
if start is None:
|
||||
start = index
|
||||
end = start
|
||||
continue
|
||||
|
||||
if index != (end + 1):
|
||||
collections.append(
|
||||
Collection(self.head, self.tail, self.padding,
|
||||
indexes=set(range(start, end + 1)))
|
||||
)
|
||||
start = index
|
||||
|
||||
end = index
|
||||
|
||||
if start is None:
|
||||
collections.append(
|
||||
Collection(self.head, self.tail, self.padding)
|
||||
)
|
||||
else:
|
||||
collections.append(
|
||||
Collection(self.head, self.tail, self.padding,
|
||||
indexes=range(start, end + 1))
|
||||
)
|
||||
|
||||
return collections
|
||||
44
pype/vendor/clique/descriptor.py
vendored
Normal file
44
pype/vendor/clique/descriptor.py
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
|
||||
class Unsettable(object):
|
||||
'''Prevent standard setting of property.
|
||||
|
||||
Example::
|
||||
|
||||
>>> class Foo(object):
|
||||
...
|
||||
... x = Unsettable('x')
|
||||
...
|
||||
... def __init__(self):
|
||||
... self.__dict__['x'] = True
|
||||
...
|
||||
>>> foo = Foo()
|
||||
>>> print foo.x
|
||||
True
|
||||
>>> foo.x = False
|
||||
AttributeError: Cannot set attribute defined as unsettable.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, label):
|
||||
'''Initialise descriptor with property *label*.
|
||||
|
||||
*label* should match the name of the property being described::
|
||||
|
||||
x = Unsettable('x')
|
||||
|
||||
'''
|
||||
self.label = label
|
||||
super(Unsettable, self).__init__()
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
'''Return value of property for *instance*.'''
|
||||
return instance.__dict__.get(self.label)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
'''Set *value* for *instance* property.'''
|
||||
raise AttributeError('Cannot set attribute defined as unsettable.')
|
||||
|
||||
10
pype/vendor/clique/error.py
vendored
Normal file
10
pype/vendor/clique/error.py
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
'''Custom error classes.'''
|
||||
|
||||
|
||||
class CollectionError(Exception):
|
||||
'''Raise when a collection error occurs.'''
|
||||
|
||||
62
pype/vendor/clique/sorted_set.py
vendored
Normal file
62
pype/vendor/clique/sorted_set.py
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
import collections
|
||||
import bisect
|
||||
|
||||
|
||||
class SortedSet(collections.MutableSet):
|
||||
'''Maintain sorted collection of unique items.'''
|
||||
|
||||
def __init__(self, iterable=None):
|
||||
'''Initialise with items from *iterable*.'''
|
||||
super(SortedSet, self).__init__()
|
||||
self._members = []
|
||||
if iterable:
|
||||
self.update(iterable)
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return str(self._members)
|
||||
|
||||
def __repr__(self):
|
||||
'''Return representation.'''
|
||||
return '<{0} "{1}">'.format(self.__class__.__name__, self)
|
||||
|
||||
def __contains__(self, item):
|
||||
'''Return whether *item* is present.'''
|
||||
return self._index(item) >= 0
|
||||
|
||||
def __len__(self):
|
||||
'''Return number of items.'''
|
||||
return len(self._members)
|
||||
|
||||
def __iter__(self):
|
||||
'''Return iterator over items.'''
|
||||
return iter(self._members)
|
||||
|
||||
def add(self, item):
|
||||
'''Add *item*.'''
|
||||
if not item in self:
|
||||
index = bisect.bisect_right(self._members, item)
|
||||
self._members.insert(index, item)
|
||||
|
||||
def discard(self, item):
|
||||
'''Remove *item*.'''
|
||||
index = self._index(item)
|
||||
if index >= 0:
|
||||
del self._members[index]
|
||||
|
||||
def update(self, iterable):
|
||||
'''Update items with those from *iterable*.'''
|
||||
for item in iterable:
|
||||
self.add(item)
|
||||
|
||||
def _index(self, item):
|
||||
'''Return index of *item* in member list or -1 if not present.'''
|
||||
index = bisect.bisect_left(self._members, item)
|
||||
if index != len(self) and self._members[index] == item:
|
||||
return index
|
||||
|
||||
return -1
|
||||
4
pype/vendor/ftrack_action_handler/__init__.py
vendored
Normal file
4
pype/vendor/ftrack_action_handler/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2017 ftrack
|
||||
|
||||
from ._version import __version__
|
||||
1
pype/vendor/ftrack_action_handler/_version.py
vendored
Normal file
1
pype/vendor/ftrack_action_handler/_version.py
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
__version__ = '0.1.3'
|
||||
233
pype/vendor/ftrack_action_handler/action.py
vendored
Normal file
233
pype/vendor/ftrack_action_handler/action.py
vendored
Normal file
|
|
@ -0,0 +1,233 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2017 ftrack
|
||||
|
||||
import logging
|
||||
|
||||
import ftrack_api
|
||||
|
||||
|
||||
class BaseAction(object):
|
||||
'''Custom Action base class
|
||||
|
||||
`label` a descriptive string identifing your action.
|
||||
|
||||
`varaint` To group actions together, give them the same
|
||||
label and specify a unique variant per action.
|
||||
|
||||
`identifier` a unique identifier for your action.
|
||||
|
||||
`description` a verbose descriptive text for you action
|
||||
|
||||
'''
|
||||
label = None
|
||||
variant = None
|
||||
identifier = None
|
||||
description = None
|
||||
|
||||
def __init__(self, session):
|
||||
'''Expects a ftrack_api.Session instance'''
|
||||
|
||||
self.logger = logging.getLogger(
|
||||
'{0}.{1}'.format(__name__, self.__class__.__name__)
|
||||
)
|
||||
|
||||
if self.label is None:
|
||||
raise ValueError(
|
||||
'Action missing label.'
|
||||
)
|
||||
|
||||
elif self.identifier is None:
|
||||
raise ValueError(
|
||||
'Action missing identifier.'
|
||||
)
|
||||
|
||||
self._session = session
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
'''Return current session.'''
|
||||
return self._session
|
||||
|
||||
def register(self):
|
||||
'''Registers the action, subscribing the the discover and launch topics.'''
|
||||
self.session.event_hub.subscribe(
|
||||
'topic=ftrack.action.discover', self._discover
|
||||
)
|
||||
|
||||
self.session.event_hub.subscribe(
|
||||
'topic=ftrack.action.launch and data.actionIdentifier={0}'.format(
|
||||
self.identifier
|
||||
),
|
||||
self._launch
|
||||
)
|
||||
|
||||
def _discover(self, event):
|
||||
args = self._translate_event(
|
||||
self.session, event
|
||||
)
|
||||
|
||||
accepts = self.discover(
|
||||
self.session, *args
|
||||
)
|
||||
|
||||
if accepts:
|
||||
return {
|
||||
'items': [{
|
||||
'label': self.label,
|
||||
'variant': self.variant,
|
||||
'description': self.description,
|
||||
'actionIdentifier': self.identifier,
|
||||
|
||||
}]
|
||||
}
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
'''Return true if we can handle the selected entities.
|
||||
|
||||
*session* is a `ftrack_api.Session` instance
|
||||
|
||||
|
||||
*entities* is a list of tuples each containing the entity type and the entity id.
|
||||
If the entity is a hierarchical you will always get the entity
|
||||
type TypedContext, once retrieved through a get operation you
|
||||
will have the "real" entity type ie. example Shot, Sequence
|
||||
or Asset Build.
|
||||
|
||||
*event* the unmodified original event
|
||||
|
||||
'''
|
||||
|
||||
return False
|
||||
|
||||
def _translate_event(self, session, event):
|
||||
'''Return *event* translated structure to be used with the API.'''
|
||||
|
||||
_selection = event['data'].get('selection', [])
|
||||
|
||||
_entities = list()
|
||||
for entity in _selection:
|
||||
_entities.append(
|
||||
(
|
||||
self._get_entity_type(entity), entity.get('entityId')
|
||||
)
|
||||
)
|
||||
|
||||
return [
|
||||
_entities,
|
||||
event
|
||||
]
|
||||
|
||||
def _get_entity_type(self, entity):
|
||||
'''Return translated entity type tht can be used with API.'''
|
||||
# Get entity type and make sure it is lower cased. Most places except
|
||||
# the component tab in the Sidebar will use lower case notation.
|
||||
entity_type = entity.get('entityType').replace('_', '').lower()
|
||||
|
||||
for schema in self.session.schemas:
|
||||
alias_for = schema.get('alias_for')
|
||||
|
||||
if (
|
||||
alias_for and isinstance(alias_for, str) and
|
||||
alias_for.lower() == entity_type
|
||||
):
|
||||
return schema['id']
|
||||
|
||||
for schema in self.session.schemas:
|
||||
if schema['id'].lower() == entity_type:
|
||||
return schema['id']
|
||||
|
||||
raise ValueError(
|
||||
'Unable to translate entity type: {0}.'.format(entity_type)
|
||||
)
|
||||
|
||||
def _launch(self, event):
|
||||
args = self._translate_event(
|
||||
self.session, event
|
||||
)
|
||||
|
||||
interface = self._interface(
|
||||
self.session, *args
|
||||
)
|
||||
|
||||
if interface:
|
||||
return interface
|
||||
|
||||
response = self.launch(
|
||||
self.session, *args
|
||||
)
|
||||
|
||||
return self._handle_result(
|
||||
self.session, response, *args
|
||||
)
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
'''Callback method for the custom action.
|
||||
|
||||
return either a bool ( True if successful or False if the action failed )
|
||||
or a dictionary with they keys `message` and `success`, the message should be a
|
||||
string and will be displayed as feedback to the user, success should be a bool,
|
||||
True if successful or False if the action failed.
|
||||
|
||||
*session* is a `ftrack_api.Session` instance
|
||||
|
||||
*entities* is a list of tuples each containing the entity type and the entity id.
|
||||
If the entity is a hierarchical you will always get the entity
|
||||
type TypedContext, once retrieved through a get operation you
|
||||
will have the "real" entity type ie. example Shot, Sequence
|
||||
or Asset Build.
|
||||
|
||||
*event* the unmodified original event
|
||||
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def _interface(self, *args):
|
||||
interface = self.interface(*args)
|
||||
|
||||
if interface:
|
||||
return {
|
||||
'items': interface
|
||||
}
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
'''Return a interface if applicable or None
|
||||
|
||||
*session* is a `ftrack_api.Session` instance
|
||||
|
||||
*entities* is a list of tuples each containing the entity type and the entity id.
|
||||
If the entity is a hierarchical you will always get the entity
|
||||
type TypedContext, once retrieved through a get operation you
|
||||
will have the "real" entity type ie. example Shot, Sequence
|
||||
or Asset Build.
|
||||
|
||||
*event* the unmodified original event
|
||||
'''
|
||||
return None
|
||||
|
||||
def _handle_result(self, session, result, entities, event):
|
||||
'''Validate the returned result from the action callback'''
|
||||
if isinstance(result, bool):
|
||||
result = {
|
||||
'success': result,
|
||||
'message': (
|
||||
'{0} launched successfully.'.format(
|
||||
self.label
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
elif isinstance(result, dict):
|
||||
for key in ('success', 'message'):
|
||||
if key in result:
|
||||
continue
|
||||
|
||||
raise KeyError(
|
||||
'Missing required key: {0}.'.format(key)
|
||||
)
|
||||
|
||||
else:
|
||||
self.logger.error(
|
||||
'Invalid result type must be bool or dictionary!'
|
||||
)
|
||||
|
||||
return result
|
||||
32
pype/vendor/ftrack_api/__init__.py
vendored
Normal file
32
pype/vendor/ftrack_api/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from ._version import __version__
|
||||
from .session import Session
|
||||
|
||||
|
||||
def mixin(instance, mixin_class, name=None):
|
||||
'''Mixin *mixin_class* to *instance*.
|
||||
|
||||
*name* can be used to specify new class name. If not specified then one will
|
||||
be generated.
|
||||
|
||||
'''
|
||||
if name is None:
|
||||
name = '{0}{1}'.format(
|
||||
instance.__class__.__name__, mixin_class.__name__
|
||||
)
|
||||
|
||||
# Check mixin class not already present in mro in order to avoid consistent
|
||||
# method resolution failure.
|
||||
if mixin_class in instance.__class__.mro():
|
||||
return
|
||||
|
||||
instance.__class__ = type(
|
||||
name,
|
||||
(
|
||||
mixin_class,
|
||||
instance.__class__
|
||||
),
|
||||
{}
|
||||
)
|
||||
658
pype/vendor/ftrack_api/_centralized_storage_scenario.py
vendored
Normal file
658
pype/vendor/ftrack_api/_centralized_storage_scenario.py
vendored
Normal file
|
|
@ -0,0 +1,658 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2016 ftrack
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from builtins import str
|
||||
from builtins import object
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
import ftrack_api
|
||||
import ftrack_api.structure.standard as _standard
|
||||
from ftrack_api.logging import LazyLogMessage as L
|
||||
|
||||
|
||||
scenario_name = 'ftrack.centralized-storage'
|
||||
|
||||
|
||||
class ConfigureCentralizedStorageScenario(object):
|
||||
'''Configure a centralized storage scenario.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Instansiate centralized storage scenario.'''
|
||||
self.logger = logging.getLogger(
|
||||
__name__ + '.' + self.__class__.__name__
|
||||
)
|
||||
|
||||
@property
|
||||
def storage_scenario(self):
|
||||
'''Return storage scenario setting.'''
|
||||
return self.session.query(
|
||||
'select value from Setting '
|
||||
'where name is "storage_scenario" and group is "STORAGE"'
|
||||
).one()
|
||||
|
||||
@property
|
||||
def existing_centralized_storage_configuration(self):
|
||||
'''Return existing centralized storage configuration.'''
|
||||
storage_scenario = self.storage_scenario
|
||||
|
||||
try:
|
||||
configuration = json.loads(storage_scenario['value'])
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
if not isinstance(configuration, dict):
|
||||
return None
|
||||
|
||||
if configuration.get('scenario') != scenario_name:
|
||||
return None
|
||||
|
||||
return configuration.get('data', {})
|
||||
|
||||
def _get_confirmation_text(self, configuration):
|
||||
'''Return confirmation text from *configuration*.'''
|
||||
configure_location = configuration.get('configure_location')
|
||||
select_location = configuration.get('select_location')
|
||||
select_mount_point = configuration.get('select_mount_point')
|
||||
|
||||
if configure_location:
|
||||
location_text = str(
|
||||
'A new location will be created:\n\n'
|
||||
'* Label: {location_label}\n'
|
||||
'* Name: {location_name}\n'
|
||||
'* Description: {location_description}\n'
|
||||
).format(**configure_location)
|
||||
else:
|
||||
location = self.session.get(
|
||||
'Location', select_location['location_id']
|
||||
)
|
||||
location_text = (
|
||||
u'You have choosen to use an existing location: {0}'.format(
|
||||
location['label']
|
||||
)
|
||||
)
|
||||
|
||||
mount_points_text = str(
|
||||
'* Linux: {linux}\n'
|
||||
'* OS X: {osx}\n'
|
||||
'* Windows: {windows}\n\n'
|
||||
).format(
|
||||
linux=select_mount_point.get('linux_mount_point') or '*Not set*',
|
||||
osx=select_mount_point.get('osx_mount_point') or '*Not set*',
|
||||
windows=select_mount_point.get('windows_mount_point') or '*Not set*'
|
||||
)
|
||||
|
||||
mount_points_not_set = []
|
||||
|
||||
if not select_mount_point.get('linux_mount_point'):
|
||||
mount_points_not_set.append('Linux')
|
||||
|
||||
if not select_mount_point.get('osx_mount_point'):
|
||||
mount_points_not_set.append('OS X')
|
||||
|
||||
if not select_mount_point.get('windows_mount_point'):
|
||||
mount_points_not_set.append('Windows')
|
||||
|
||||
if mount_points_not_set:
|
||||
mount_points_text += str(
|
||||
'Please be aware that this location will not be working on '
|
||||
'{missing} because the mount points are not set up.'
|
||||
).format(
|
||||
missing=' and '.join(mount_points_not_set)
|
||||
)
|
||||
|
||||
text = str(
|
||||
'#Confirm storage setup#\n\n'
|
||||
'Almost there! Please take a moment to verify the settings you '
|
||||
'are about to save. You can always come back later and update the '
|
||||
'configuration.\n'
|
||||
'##Location##\n\n'
|
||||
'{location}\n'
|
||||
'##Mount points##\n\n'
|
||||
'{mount_points}'
|
||||
).format(
|
||||
location=location_text,
|
||||
mount_points=mount_points_text
|
||||
)
|
||||
|
||||
return text
|
||||
|
||||
def configure_scenario(self, event):
|
||||
'''Configure scenario based on *event* and return form items.'''
|
||||
steps = (
|
||||
'select_scenario',
|
||||
'select_location',
|
||||
'configure_location',
|
||||
'select_structure',
|
||||
'select_mount_point',
|
||||
'confirm_summary',
|
||||
'save_configuration'
|
||||
)
|
||||
|
||||
warning_message = ''
|
||||
values = event['data'].get('values', {})
|
||||
|
||||
# Calculate previous step and the next.
|
||||
previous_step = values.get('step', 'select_scenario')
|
||||
next_step = steps[steps.index(previous_step) + 1]
|
||||
state = 'configuring'
|
||||
|
||||
self.logger.info(L(
|
||||
u'Configuring scenario, previous step: {0}, next step: {1}. '
|
||||
u'Values {2!r}.',
|
||||
previous_step, next_step, values
|
||||
))
|
||||
|
||||
if 'configuration' in values:
|
||||
configuration = values.pop('configuration')
|
||||
else:
|
||||
configuration = {}
|
||||
|
||||
if values:
|
||||
# Update configuration with values from the previous step.
|
||||
configuration[previous_step] = values
|
||||
|
||||
if previous_step == 'select_location':
|
||||
values = configuration['select_location']
|
||||
if values.get('location_id') != 'create_new_location':
|
||||
location_exists = self.session.query(
|
||||
'Location where id is "{0}"'.format(
|
||||
values.get('location_id')
|
||||
)
|
||||
).first()
|
||||
if not location_exists:
|
||||
next_step = 'select_location'
|
||||
warning_message = (
|
||||
'**The selected location does not exist. Please choose '
|
||||
'one from the dropdown or create a new one.**'
|
||||
)
|
||||
|
||||
if next_step == 'select_location':
|
||||
try:
|
||||
location_id = (
|
||||
self.existing_centralized_storage_configuration['location_id']
|
||||
)
|
||||
except (KeyError, TypeError):
|
||||
location_id = None
|
||||
|
||||
options = [{
|
||||
'label': 'Create new location',
|
||||
'value': 'create_new_location'
|
||||
}]
|
||||
for location in self.session.query(
|
||||
'select name, label, description from Location'
|
||||
):
|
||||
if location['name'] not in (
|
||||
'ftrack.origin', 'ftrack.unmanaged', 'ftrack.connect',
|
||||
'ftrack.server', 'ftrack.review'
|
||||
):
|
||||
options.append({
|
||||
'label': u'{label} ({name})'.format(
|
||||
label=location['label'], name=location['name']
|
||||
),
|
||||
'description': location['description'],
|
||||
'value': location['id']
|
||||
})
|
||||
|
||||
warning = ''
|
||||
if location_id is not None:
|
||||
# If there is already a location configured we must make the
|
||||
# user aware that changing the location may be problematic.
|
||||
warning = (
|
||||
'\n\n**Be careful if you switch to another location '
|
||||
'for an existing storage scenario. Components that have '
|
||||
'already been published to the previous location will be '
|
||||
'made unavailable for common use.**'
|
||||
)
|
||||
default_value = location_id
|
||||
elif location_id is None and len(options) == 1:
|
||||
# No location configured and no existing locations to use.
|
||||
default_value = 'create_new_location'
|
||||
else:
|
||||
# There are existing locations to choose from but non of them
|
||||
# are currently active in the centralized storage scenario.
|
||||
default_value = None
|
||||
|
||||
items = [{
|
||||
'type': 'label',
|
||||
'value': (
|
||||
'#Select location#\n'
|
||||
'Choose an already existing location or create a new one '
|
||||
'to represent your centralized storage. {0}'.format(
|
||||
warning
|
||||
)
|
||||
)
|
||||
}, {
|
||||
'type': 'enumerator',
|
||||
'label': 'Location',
|
||||
'name': 'location_id',
|
||||
'value': default_value,
|
||||
'data': options
|
||||
}]
|
||||
|
||||
default_location_name = 'studio.central-storage-location'
|
||||
default_location_label = 'Studio location'
|
||||
default_location_description = (
|
||||
'The studio central location where all components are '
|
||||
'stored.'
|
||||
)
|
||||
|
||||
if previous_step == 'configure_location':
|
||||
configure_location = configuration.get(
|
||||
'configure_location'
|
||||
)
|
||||
|
||||
if configure_location:
|
||||
try:
|
||||
existing_location = self.session.query(
|
||||
u'Location where name is "{0}"'.format(
|
||||
configure_location.get('location_name')
|
||||
)
|
||||
).first()
|
||||
except UnicodeEncodeError:
|
||||
next_step = 'configure_location'
|
||||
warning_message += (
|
||||
'**The location name contains non-ascii characters. '
|
||||
'Please change the name and try again.**'
|
||||
)
|
||||
values = configuration['select_location']
|
||||
else:
|
||||
if existing_location:
|
||||
next_step = 'configure_location'
|
||||
warning_message += (
|
||||
u'**There is already a location named {0}. '
|
||||
u'Please change the name and try again.**'.format(
|
||||
configure_location.get('location_name')
|
||||
)
|
||||
)
|
||||
values = configuration['select_location']
|
||||
|
||||
if (
|
||||
not configure_location.get('location_name') or
|
||||
not configure_location.get('location_label') or
|
||||
not configure_location.get('location_description')
|
||||
):
|
||||
next_step = 'configure_location'
|
||||
warning_message += (
|
||||
'**Location name, label and description cannot '
|
||||
'be empty.**'
|
||||
)
|
||||
values = configuration['select_location']
|
||||
|
||||
if next_step == 'configure_location':
|
||||
# Populate form with previous configuration.
|
||||
default_location_label = configure_location['location_label']
|
||||
default_location_name = configure_location['location_name']
|
||||
default_location_description = (
|
||||
configure_location['location_description']
|
||||
)
|
||||
|
||||
if next_step == 'configure_location':
|
||||
|
||||
if values.get('location_id') == 'create_new_location':
|
||||
# Add options to create a new location.
|
||||
items = [{
|
||||
'type': 'label',
|
||||
'value': (
|
||||
'#Create location#\n'
|
||||
'Here you will create a new location to be used '
|
||||
'with your new Storage scenario. For your '
|
||||
'convenience we have already filled in some default '
|
||||
'values. If this is the first time you are configuring '
|
||||
'a storage scenario in ftrack we recommend that you '
|
||||
'stick with these settings.'
|
||||
)
|
||||
}, {
|
||||
'label': 'Label',
|
||||
'name': 'location_label',
|
||||
'value': default_location_label,
|
||||
'type': 'text'
|
||||
}, {
|
||||
'label': 'Name',
|
||||
'name': 'location_name',
|
||||
'value': default_location_name,
|
||||
'type': 'text'
|
||||
}, {
|
||||
'label': 'Description',
|
||||
'name': 'location_description',
|
||||
'value': default_location_description,
|
||||
'type': 'text'
|
||||
}]
|
||||
|
||||
else:
|
||||
# The user selected an existing location. Move on to next
|
||||
# step.
|
||||
next_step = 'select_mount_point'
|
||||
|
||||
if next_step == 'select_structure':
|
||||
# There is only one structure to choose from, go to next step.
|
||||
next_step = 'select_mount_point'
|
||||
# items = [
|
||||
# {
|
||||
# 'type': 'label',
|
||||
# 'value': (
|
||||
# '#Select structure#\n'
|
||||
# 'Select which structure to use with your location. '
|
||||
# 'The structure is used to generate the filesystem '
|
||||
# 'path for components that are added to this location.'
|
||||
# )
|
||||
# },
|
||||
# {
|
||||
# 'type': 'enumerator',
|
||||
# 'label': 'Structure',
|
||||
# 'name': 'structure_id',
|
||||
# 'value': 'standard',
|
||||
# 'data': [{
|
||||
# 'label': 'Standard',
|
||||
# 'value': 'standard',
|
||||
# 'description': (
|
||||
# 'The Standard structure uses the names in your '
|
||||
# 'project structure to determine the path.'
|
||||
# )
|
||||
# }]
|
||||
# }
|
||||
# ]
|
||||
|
||||
if next_step == 'select_mount_point':
|
||||
try:
|
||||
mount_points = (
|
||||
self.existing_centralized_storage_configuration['accessor']['mount_points']
|
||||
)
|
||||
except (KeyError, TypeError):
|
||||
mount_points = dict()
|
||||
|
||||
items = [
|
||||
{
|
||||
'value': (
|
||||
'#Mount points#\n'
|
||||
'Set mount points for your centralized storage '
|
||||
'location. For the location to work as expected each '
|
||||
'platform that you intend to use must have the '
|
||||
'corresponding mount point set and the storage must '
|
||||
'be accessible. If not set correctly files will not be '
|
||||
'saved or read.'
|
||||
),
|
||||
'type': 'label'
|
||||
}, {
|
||||
'type': 'text',
|
||||
'label': 'Linux',
|
||||
'name': 'linux_mount_point',
|
||||
'empty_text': 'E.g. /usr/mnt/MyStorage ...',
|
||||
'value': mount_points.get('linux', '')
|
||||
}, {
|
||||
'type': 'text',
|
||||
'label': 'OS X',
|
||||
'name': 'osx_mount_point',
|
||||
'empty_text': 'E.g. /Volumes/MyStorage ...',
|
||||
'value': mount_points.get('osx', '')
|
||||
}, {
|
||||
'type': 'text',
|
||||
'label': 'Windows',
|
||||
'name': 'windows_mount_point',
|
||||
'empty_text': 'E.g. \\\\MyStorage ...',
|
||||
'value': mount_points.get('windows', '')
|
||||
}
|
||||
]
|
||||
|
||||
if next_step == 'confirm_summary':
|
||||
items = [{
|
||||
'type': 'label',
|
||||
'value': self._get_confirmation_text(configuration)
|
||||
}]
|
||||
state = 'confirm'
|
||||
|
||||
if next_step == 'save_configuration':
|
||||
mount_points = configuration['select_mount_point']
|
||||
select_location = configuration['select_location']
|
||||
|
||||
if select_location['location_id'] == 'create_new_location':
|
||||
configure_location = configuration['configure_location']
|
||||
location = self.session.create(
|
||||
'Location',
|
||||
{
|
||||
'name': configure_location['location_name'],
|
||||
'label': configure_location['location_label'],
|
||||
'description': (
|
||||
configure_location['location_description']
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
else:
|
||||
location = self.session.query(
|
||||
'Location where id is "{0}"'.format(
|
||||
select_location['location_id']
|
||||
)
|
||||
).one()
|
||||
|
||||
setting_value = json.dumps({
|
||||
'scenario': scenario_name,
|
||||
'data': {
|
||||
'location_id': location['id'],
|
||||
'location_name': location['name'],
|
||||
'accessor': {
|
||||
'mount_points': {
|
||||
'linux': mount_points['linux_mount_point'],
|
||||
'osx': mount_points['osx_mount_point'],
|
||||
'windows': mount_points['windows_mount_point']
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
self.storage_scenario['value'] = setting_value
|
||||
self.session.commit()
|
||||
|
||||
# Broadcast an event that storage scenario has been configured.
|
||||
event = ftrack_api.event.base.Event(
|
||||
topic='ftrack.storage-scenario.configure-done'
|
||||
)
|
||||
self.session.event_hub.publish(event)
|
||||
|
||||
items = [{
|
||||
'type': 'label',
|
||||
'value': (
|
||||
'#Done!#\n'
|
||||
'Your storage scenario is now configured and ready '
|
||||
'to use. **Note that you may have to restart Connect and '
|
||||
'other applications to start using it.**'
|
||||
)
|
||||
}]
|
||||
state = 'done'
|
||||
|
||||
if warning_message:
|
||||
items.insert(0, {
|
||||
'type': 'label',
|
||||
'value': warning_message
|
||||
})
|
||||
|
||||
items.append({
|
||||
'type': 'hidden',
|
||||
'value': configuration,
|
||||
'name': 'configuration'
|
||||
})
|
||||
items.append({
|
||||
'type': 'hidden',
|
||||
'value': next_step,
|
||||
'name': 'step'
|
||||
})
|
||||
|
||||
return {
|
||||
'items': items,
|
||||
'state': state
|
||||
}
|
||||
|
||||
def discover_centralized_scenario(self, event):
|
||||
'''Return action discover dictionary for *event*.'''
|
||||
return {
|
||||
'id': scenario_name,
|
||||
'name': 'Centralized storage scenario',
|
||||
'description': (
|
||||
'(Recommended) centralized storage scenario where all files '
|
||||
'are kept on a storage that is mounted and available to '
|
||||
'everyone in the studio.'
|
||||
)
|
||||
}
|
||||
|
||||
def register(self, session):
|
||||
'''Subscribe to events on *session*.'''
|
||||
self.session = session
|
||||
|
||||
#: TODO: Move these to a separate function.
|
||||
session.event_hub.subscribe(
|
||||
str(
|
||||
'topic=ftrack.storage-scenario.discover '
|
||||
'and source.user.username="{0}"'
|
||||
).format(
|
||||
session.api_user
|
||||
),
|
||||
self.discover_centralized_scenario
|
||||
)
|
||||
session.event_hub.subscribe(
|
||||
str(
|
||||
'topic=ftrack.storage-scenario.configure '
|
||||
'and data.scenario_id="{0}" '
|
||||
'and source.user.username="{1}"'
|
||||
).format(
|
||||
scenario_name,
|
||||
session.api_user
|
||||
),
|
||||
self.configure_scenario
|
||||
)
|
||||
|
||||
|
||||
class ActivateCentralizedStorageScenario(object):
|
||||
'''Activate a centralized storage scenario.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Instansiate centralized storage scenario.'''
|
||||
self.logger = logging.getLogger(
|
||||
__name__ + '.' + self.__class__.__name__
|
||||
)
|
||||
|
||||
def activate(self, event):
|
||||
'''Activate scenario in *event*.'''
|
||||
storage_scenario = event['data']['storage_scenario']
|
||||
|
||||
try:
|
||||
location_data = storage_scenario['data']
|
||||
location_name = location_data['location_name']
|
||||
location_id = location_data['location_id']
|
||||
mount_points = location_data['accessor']['mount_points']
|
||||
|
||||
except KeyError:
|
||||
error_message = (
|
||||
'Unable to read storage scenario data.'
|
||||
)
|
||||
self.logger.error(L(error_message))
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'Unable to configure location based on scenario.'
|
||||
)
|
||||
|
||||
else:
|
||||
location = self.session.create(
|
||||
'Location',
|
||||
data=dict(
|
||||
name=location_name,
|
||||
id=location_id
|
||||
),
|
||||
reconstructing=True
|
||||
)
|
||||
|
||||
if 'darwin' in sys.platform:
|
||||
prefix = mount_points['osx']
|
||||
elif 'linux' in sys.platform:
|
||||
prefix = mount_points['linux']
|
||||
elif 'win' in sys.platform:
|
||||
prefix = mount_points['windows']
|
||||
else:
|
||||
raise ftrack_api.exception.LocationError(
|
||||
(
|
||||
'Unable to find accessor prefix for platform {0}.'
|
||||
).format(sys.platform)
|
||||
)
|
||||
|
||||
location.accessor = ftrack_api.accessor.disk.DiskAccessor(
|
||||
prefix=prefix
|
||||
)
|
||||
location.structure = _standard.StandardStructure()
|
||||
location.priority = 1
|
||||
self.logger.info(L(
|
||||
u'Storage scenario activated. Configured {0!r} from '
|
||||
u'{1!r}',
|
||||
location, storage_scenario
|
||||
))
|
||||
|
||||
def _verify_startup(self, event):
|
||||
'''Verify the storage scenario configuration.'''
|
||||
storage_scenario = event['data']['storage_scenario']
|
||||
location_data = storage_scenario['data']
|
||||
mount_points = location_data['accessor']['mount_points']
|
||||
|
||||
prefix = None
|
||||
if 'darwin' in sys.platform:
|
||||
prefix = mount_points['osx']
|
||||
elif 'linux' in sys.platform:
|
||||
prefix = mount_points['linux']
|
||||
elif 'win' in sys.platform:
|
||||
prefix = mount_points['windows']
|
||||
|
||||
if not prefix:
|
||||
return (
|
||||
u'The storage scenario has not been configured for your '
|
||||
u'operating system. ftrack may not be able to '
|
||||
u'store and track files correctly.'
|
||||
)
|
||||
|
||||
if not os.path.isdir(prefix):
|
||||
return (
|
||||
str(
|
||||
'The path {0} does not exist. ftrack may not be able to '
|
||||
'store and track files correctly. \n\nIf the storage is '
|
||||
'newly setup you may want to create necessary folder '
|
||||
'structures. If the storage is a network drive you should '
|
||||
'make sure that it is mounted correctly.'
|
||||
).format(prefix)
|
||||
)
|
||||
|
||||
def register(self, session):
|
||||
'''Subscribe to events on *session*.'''
|
||||
self.session = session
|
||||
|
||||
session.event_hub.subscribe(
|
||||
(
|
||||
'topic=ftrack.storage-scenario.activate '
|
||||
'and data.storage_scenario.scenario="{0}"'.format(
|
||||
scenario_name
|
||||
)
|
||||
),
|
||||
self.activate
|
||||
)
|
||||
|
||||
# Listen to verify startup event from ftrack connect to allow responding
|
||||
# with a message if something is not working correctly with this
|
||||
# scenario that the user should be notified about.
|
||||
self.session.event_hub.subscribe(
|
||||
(
|
||||
'topic=ftrack.connect.verify-startup '
|
||||
'and data.storage_scenario.scenario="{0}"'.format(
|
||||
scenario_name
|
||||
)
|
||||
),
|
||||
self._verify_startup
|
||||
)
|
||||
|
||||
def register(session):
|
||||
'''Register storage scenario.'''
|
||||
scenario = ActivateCentralizedStorageScenario()
|
||||
scenario.register(session)
|
||||
|
||||
|
||||
def register_configuration(session):
|
||||
'''Register storage scenario.'''
|
||||
scenario = ConfigureCentralizedStorageScenario()
|
||||
scenario.register(session)
|
||||
535
pype/vendor/ftrack_api/_python_ntpath.py
vendored
Normal file
535
pype/vendor/ftrack_api/_python_ntpath.py
vendored
Normal file
|
|
@ -0,0 +1,535 @@
|
|||
# pragma: no cover
|
||||
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
|
||||
"""Common pathname manipulations, WindowsNT/95 version.
|
||||
|
||||
Instead of importing this module directly, import os and refer to this
|
||||
module as os.path.
|
||||
"""
|
||||
|
||||
from builtins import zip
|
||||
import os
|
||||
import sys
|
||||
import stat
|
||||
import genericpath
|
||||
import warnings
|
||||
|
||||
from genericpath import *
|
||||
|
||||
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
|
||||
"basename","dirname","commonprefix","getsize","getmtime",
|
||||
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
|
||||
"ismount","walk","expanduser","expandvars","normpath","abspath",
|
||||
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
|
||||
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
|
||||
|
||||
# strings representing various path-related bits and pieces
|
||||
curdir = '.'
|
||||
pardir = '..'
|
||||
extsep = '.'
|
||||
sep = '\\'
|
||||
pathsep = ';'
|
||||
altsep = '/'
|
||||
defpath = '.;C:\\bin'
|
||||
if 'ce' in sys.builtin_module_names:
|
||||
defpath = '\\Windows'
|
||||
elif 'os2' in sys.builtin_module_names:
|
||||
# OS/2 w/ VACPP
|
||||
altsep = '/'
|
||||
devnull = 'nul'
|
||||
|
||||
# Normalize the case of a pathname and map slashes to backslashes.
|
||||
# Other normalizations (such as optimizing '../' away) are not done
|
||||
# (this is done by normpath).
|
||||
|
||||
def normcase(s):
|
||||
"""Normalize case of pathname.
|
||||
|
||||
Makes all characters lowercase and all slashes into backslashes."""
|
||||
return s.replace("/", "\\").lower()
|
||||
|
||||
|
||||
# Return whether a path is absolute.
|
||||
# Trivial in Posix, harder on the Mac or MS-DOS.
|
||||
# For DOS it is absolute if it starts with a slash or backslash (current
|
||||
# volume), or if a pathname after the volume letter and colon / UNC resource
|
||||
# starts with a slash or backslash.
|
||||
|
||||
def isabs(s):
|
||||
"""Test whether a path is absolute"""
|
||||
s = splitdrive(s)[1]
|
||||
return s != '' and s[:1] in '/\\'
|
||||
|
||||
|
||||
# Join two (or more) paths.
|
||||
|
||||
def join(a, *p):
|
||||
"""Join two or more pathname components, inserting "\\" as needed.
|
||||
If any component is an absolute path, all previous path components
|
||||
will be discarded."""
|
||||
path = a
|
||||
for b in p:
|
||||
b_wins = 0 # set to 1 iff b makes path irrelevant
|
||||
if path == "":
|
||||
b_wins = 1
|
||||
|
||||
elif isabs(b):
|
||||
# This probably wipes out path so far. However, it's more
|
||||
# complicated if path begins with a drive letter:
|
||||
# 1. join('c:', '/a') == 'c:/a'
|
||||
# 2. join('c:/', '/a') == 'c:/a'
|
||||
# But
|
||||
# 3. join('c:/a', '/b') == '/b'
|
||||
# 4. join('c:', 'd:/') = 'd:/'
|
||||
# 5. join('c:/', 'd:/') = 'd:/'
|
||||
if path[1:2] != ":" or b[1:2] == ":":
|
||||
# Path doesn't start with a drive letter, or cases 4 and 5.
|
||||
b_wins = 1
|
||||
|
||||
# Else path has a drive letter, and b doesn't but is absolute.
|
||||
elif len(path) > 3 or (len(path) == 3 and
|
||||
path[-1] not in "/\\"):
|
||||
# case 3
|
||||
b_wins = 1
|
||||
|
||||
if b_wins:
|
||||
path = b
|
||||
else:
|
||||
# Join, and ensure there's a separator.
|
||||
assert len(path) > 0
|
||||
if path[-1] in "/\\":
|
||||
if b and b[0] in "/\\":
|
||||
path += b[1:]
|
||||
else:
|
||||
path += b
|
||||
elif path[-1] == ":":
|
||||
path += b
|
||||
elif b:
|
||||
if b[0] in "/\\":
|
||||
path += b
|
||||
else:
|
||||
path += "\\" + b
|
||||
else:
|
||||
# path is not empty and does not end with a backslash,
|
||||
# but b is empty; since, e.g., split('a/') produces
|
||||
# ('a', ''), it's best if join() adds a backslash in
|
||||
# this case.
|
||||
path += '\\'
|
||||
|
||||
return path
|
||||
|
||||
|
||||
# Split a path in a drive specification (a drive letter followed by a
|
||||
# colon) and the path specification.
|
||||
# It is always true that drivespec + pathspec == p
|
||||
def splitdrive(p):
|
||||
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
|
||||
"(drive,path)"; either part may be empty"""
|
||||
if p[1:2] == ':':
|
||||
return p[0:2], p[2:]
|
||||
return '', p
|
||||
|
||||
|
||||
# Parse UNC paths
|
||||
def splitunc(p):
|
||||
"""Split a pathname into UNC mount point and relative path specifiers.
|
||||
|
||||
Return a 2-tuple (unc, rest); either part may be empty.
|
||||
If unc is not empty, it has the form '//host/mount' (or similar
|
||||
using backslashes). unc+rest is always the input path.
|
||||
Paths containing drive letters never have an UNC part.
|
||||
"""
|
||||
if p[1:2] == ':':
|
||||
return '', p # Drive letter present
|
||||
firstTwo = p[0:2]
|
||||
if firstTwo == '//' or firstTwo == '\\\\':
|
||||
# is a UNC path:
|
||||
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
|
||||
# \\machine\mountpoint\directories...
|
||||
# directory ^^^^^^^^^^^^^^^
|
||||
normp = normcase(p)
|
||||
index = normp.find('\\', 2)
|
||||
if index == -1:
|
||||
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
|
||||
return ("", p)
|
||||
index = normp.find('\\', index + 1)
|
||||
if index == -1:
|
||||
index = len(p)
|
||||
return p[:index], p[index:]
|
||||
return '', p
|
||||
|
||||
|
||||
# Split a path in head (everything up to the last '/') and tail (the
|
||||
# rest). After the trailing '/' is stripped, the invariant
|
||||
# join(head, tail) == p holds.
|
||||
# The resulting head won't end in '/' unless it is the root.
|
||||
|
||||
def split(p):
|
||||
"""Split a pathname.
|
||||
|
||||
Return tuple (head, tail) where tail is everything after the final slash.
|
||||
Either part may be empty."""
|
||||
|
||||
d, p = splitdrive(p)
|
||||
# set i to index beyond p's last slash
|
||||
i = len(p)
|
||||
while i and p[i-1] not in '/\\':
|
||||
i = i - 1
|
||||
head, tail = p[:i], p[i:] # now tail has no slashes
|
||||
# remove trailing slashes from head, unless it's all slashes
|
||||
head2 = head
|
||||
while head2 and head2[-1] in '/\\':
|
||||
head2 = head2[:-1]
|
||||
head = head2 or head
|
||||
return d + head, tail
|
||||
|
||||
|
||||
# Split a path in root and extension.
|
||||
# The extension is everything starting at the last dot in the last
|
||||
# pathname component; the root is everything before that.
|
||||
# It is always true that root + ext == p.
|
||||
|
||||
def splitext(p):
|
||||
return genericpath._splitext(p, sep, altsep, extsep)
|
||||
splitext.__doc__ = genericpath._splitext.__doc__
|
||||
|
||||
|
||||
# Return the tail (basename) part of a path.
|
||||
|
||||
def basename(p):
|
||||
"""Returns the final component of a pathname"""
|
||||
return split(p)[1]
|
||||
|
||||
|
||||
# Return the head (dirname) part of a path.
|
||||
|
||||
def dirname(p):
|
||||
"""Returns the directory component of a pathname"""
|
||||
return split(p)[0]
|
||||
|
||||
# Is a path a symbolic link?
|
||||
# This will always return false on systems where posix.lstat doesn't exist.
|
||||
|
||||
def islink(path):
|
||||
"""Test for symbolic link.
|
||||
On WindowsNT/95 and OS/2 always returns false
|
||||
"""
|
||||
return False
|
||||
|
||||
# alias exists to lexists
|
||||
lexists = exists
|
||||
|
||||
# Is a path a mount point? Either a root (with or without drive letter)
|
||||
# or an UNC path with at most a / or \ after the mount point.
|
||||
|
||||
def ismount(path):
|
||||
"""Test whether a path is a mount point (defined as root of drive)"""
|
||||
unc, rest = splitunc(path)
|
||||
if unc:
|
||||
return rest in ("", "/", "\\")
|
||||
p = splitdrive(path)[1]
|
||||
return len(p) == 1 and p[0] in '/\\'
|
||||
|
||||
|
||||
# Directory tree walk.
|
||||
# For each directory under top (including top itself, but excluding
|
||||
# '.' and '..'), func(arg, dirname, filenames) is called, where
|
||||
# dirname is the name of the directory and filenames is the list
|
||||
# of files (and subdirectories etc.) in the directory.
|
||||
# The func may modify the filenames list, to implement a filter,
|
||||
# or to impose a different order of visiting.
|
||||
|
||||
def walk(top, func, arg):
|
||||
"""Directory tree walk with callback function.
|
||||
|
||||
For each directory in the directory tree rooted at top (including top
|
||||
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
|
||||
dirname is the name of the directory, and fnames a list of the names of
|
||||
the files and subdirectories in dirname (excluding '.' and '..'). func
|
||||
may modify the fnames list in-place (e.g. via del or slice assignment),
|
||||
and walk will only recurse into the subdirectories whose names remain in
|
||||
fnames; this can be used to implement a filter, or to impose a specific
|
||||
order of visiting. No semantics are defined for, or required of, arg,
|
||||
beyond that arg is always passed to func. It can be used, e.g., to pass
|
||||
a filename pattern, or a mutable object designed to accumulate
|
||||
statistics. Passing None for arg is common."""
|
||||
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
|
||||
stacklevel=2)
|
||||
try:
|
||||
names = os.listdir(top)
|
||||
except os.error:
|
||||
return
|
||||
func(arg, top, names)
|
||||
for name in names:
|
||||
name = join(top, name)
|
||||
if isdir(name):
|
||||
walk(name, func, arg)
|
||||
|
||||
|
||||
# Expand paths beginning with '~' or '~user'.
|
||||
# '~' means $HOME; '~user' means that user's home directory.
|
||||
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
|
||||
# the path is returned unchanged (leaving error reporting to whatever
|
||||
# function is called with the expanded path as argument).
|
||||
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
|
||||
# (A function should also be defined to do full *sh-style environment
|
||||
# variable expansion.)
|
||||
|
||||
def expanduser(path):
|
||||
"""Expand ~ and ~user constructs.
|
||||
|
||||
If user or $HOME is unknown, do nothing."""
|
||||
if path[:1] != '~':
|
||||
return path
|
||||
i, n = 1, len(path)
|
||||
while i < n and path[i] not in '/\\':
|
||||
i = i + 1
|
||||
|
||||
if 'HOME' in os.environ:
|
||||
userhome = os.environ['HOME']
|
||||
elif 'USERPROFILE' in os.environ:
|
||||
userhome = os.environ['USERPROFILE']
|
||||
elif not 'HOMEPATH' in os.environ:
|
||||
return path
|
||||
else:
|
||||
try:
|
||||
drive = os.environ['HOMEDRIVE']
|
||||
except KeyError:
|
||||
drive = ''
|
||||
userhome = join(drive, os.environ['HOMEPATH'])
|
||||
|
||||
if i != 1: #~user
|
||||
userhome = join(dirname(userhome), path[1:i])
|
||||
|
||||
return userhome + path[i:]
|
||||
|
||||
|
||||
# Expand paths containing shell variable substitutions.
|
||||
# The following rules apply:
|
||||
# - no expansion within single quotes
|
||||
# - '$$' is translated into '$'
|
||||
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
|
||||
# - ${varname} is accepted.
|
||||
# - $varname is accepted.
|
||||
# - %varname% is accepted.
|
||||
# - varnames can be made out of letters, digits and the characters '_-'
|
||||
# (though is not verified in the ${varname} and %varname% cases)
|
||||
# XXX With COMMAND.COM you can use any characters in a variable name,
|
||||
# XXX except '^|<>='.
|
||||
|
||||
def expandvars(path):
|
||||
"""Expand shell variables of the forms $var, ${var} and %var%.
|
||||
|
||||
Unknown variables are left unchanged."""
|
||||
if '$' not in path and '%' not in path:
|
||||
return path
|
||||
import string
|
||||
varchars = string.ascii_letters + string.digits + '_-'
|
||||
res = ''
|
||||
index = 0
|
||||
pathlen = len(path)
|
||||
while index < pathlen:
|
||||
c = path[index]
|
||||
if c == '\'': # no expansion within single quotes
|
||||
path = path[index + 1:]
|
||||
pathlen = len(path)
|
||||
try:
|
||||
index = path.index('\'')
|
||||
res = res + '\'' + path[:index + 1]
|
||||
except ValueError:
|
||||
res = res + path
|
||||
index = pathlen - 1
|
||||
elif c == '%': # variable or '%'
|
||||
if path[index + 1:index + 2] == '%':
|
||||
res = res + c
|
||||
index = index + 1
|
||||
else:
|
||||
path = path[index+1:]
|
||||
pathlen = len(path)
|
||||
try:
|
||||
index = path.index('%')
|
||||
except ValueError:
|
||||
res = res + '%' + path
|
||||
index = pathlen - 1
|
||||
else:
|
||||
var = path[:index]
|
||||
if var in os.environ:
|
||||
res = res + os.environ[var]
|
||||
else:
|
||||
res = res + '%' + var + '%'
|
||||
elif c == '$': # variable or '$$'
|
||||
if path[index + 1:index + 2] == '$':
|
||||
res = res + c
|
||||
index = index + 1
|
||||
elif path[index + 1:index + 2] == '{':
|
||||
path = path[index+2:]
|
||||
pathlen = len(path)
|
||||
try:
|
||||
index = path.index('}')
|
||||
var = path[:index]
|
||||
if var in os.environ:
|
||||
res = res + os.environ[var]
|
||||
else:
|
||||
res = res + '${' + var + '}'
|
||||
except ValueError:
|
||||
res = res + '${' + path
|
||||
index = pathlen - 1
|
||||
else:
|
||||
var = ''
|
||||
index = index + 1
|
||||
c = path[index:index + 1]
|
||||
while c != '' and c in varchars:
|
||||
var = var + c
|
||||
index = index + 1
|
||||
c = path[index:index + 1]
|
||||
if var in os.environ:
|
||||
res = res + os.environ[var]
|
||||
else:
|
||||
res = res + '$' + var
|
||||
if c != '':
|
||||
index = index - 1
|
||||
else:
|
||||
res = res + c
|
||||
index = index + 1
|
||||
return res
|
||||
|
||||
|
||||
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
|
||||
# Previously, this function also truncated pathnames to 8+3 format,
|
||||
# but as this module is called "ntpath", that's obviously wrong!
|
||||
|
||||
def normpath(path):
|
||||
"""Normalize path, eliminating double slashes, etc."""
|
||||
# Preserve unicode (if path is unicode)
|
||||
backslash, dot = (u'\\', u'.') if isinstance(path, str) else ('\\', '.')
|
||||
if path.startswith(('\\\\.\\', '\\\\?\\')):
|
||||
# in the case of paths with these prefixes:
|
||||
# \\.\ -> device names
|
||||
# \\?\ -> literal paths
|
||||
# do not do any normalization, but return the path unchanged
|
||||
return path
|
||||
path = path.replace("/", "\\")
|
||||
prefix, path = splitdrive(path)
|
||||
# We need to be careful here. If the prefix is empty, and the path starts
|
||||
# with a backslash, it could either be an absolute path on the current
|
||||
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
|
||||
# is therefore imperative NOT to collapse multiple backslashes blindly in
|
||||
# that case.
|
||||
# The code below preserves multiple backslashes when there is no drive
|
||||
# letter. This means that the invalid filename \\\a\b is preserved
|
||||
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
|
||||
# is any better behaviour for such edge cases.
|
||||
if prefix == '':
|
||||
# No drive letter - preserve initial backslashes
|
||||
while path[:1] == "\\":
|
||||
prefix = prefix + backslash
|
||||
path = path[1:]
|
||||
else:
|
||||
# We have a drive letter - collapse initial backslashes
|
||||
if path.startswith("\\"):
|
||||
prefix = prefix + backslash
|
||||
path = path.lstrip("\\")
|
||||
comps = path.split("\\")
|
||||
i = 0
|
||||
while i < len(comps):
|
||||
if comps[i] in ('.', ''):
|
||||
del comps[i]
|
||||
elif comps[i] == '..':
|
||||
if i > 0 and comps[i-1] != '..':
|
||||
del comps[i-1:i+1]
|
||||
i -= 1
|
||||
elif i == 0 and prefix.endswith("\\"):
|
||||
del comps[i]
|
||||
else:
|
||||
i += 1
|
||||
else:
|
||||
i += 1
|
||||
# If the path is now empty, substitute '.'
|
||||
if not prefix and not comps:
|
||||
comps.append(dot)
|
||||
return prefix + backslash.join(comps)
|
||||
|
||||
|
||||
# Return an absolute path.
|
||||
try:
|
||||
from nt import _getfullpathname
|
||||
|
||||
except ImportError: # not running on Windows - mock up something sensible
|
||||
def abspath(path):
|
||||
"""Return the absolute version of a path."""
|
||||
if not isabs(path):
|
||||
if isinstance(path, str):
|
||||
cwd = os.getcwd()
|
||||
else:
|
||||
cwd = os.getcwd()
|
||||
path = join(cwd, path)
|
||||
return normpath(path)
|
||||
|
||||
else: # use native Windows method on Windows
|
||||
def abspath(path):
|
||||
"""Return the absolute version of a path."""
|
||||
|
||||
if path: # Empty path must return current working directory.
|
||||
try:
|
||||
path = _getfullpathname(path)
|
||||
except WindowsError:
|
||||
pass # Bad path - return unchanged.
|
||||
elif isinstance(path, str):
|
||||
path = os.getcwd()
|
||||
else:
|
||||
path = os.getcwd()
|
||||
return normpath(path)
|
||||
|
||||
# realpath is a no-op on systems without islink support
|
||||
realpath = abspath
|
||||
# Win9x family and earlier have no Unicode filename support.
|
||||
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
|
||||
sys.getwindowsversion()[3] >= 2)
|
||||
|
||||
def _abspath_split(path):
|
||||
abs = abspath(normpath(path))
|
||||
prefix, rest = splitunc(abs)
|
||||
is_unc = bool(prefix)
|
||||
if not is_unc:
|
||||
prefix, rest = splitdrive(abs)
|
||||
return is_unc, prefix, [x for x in rest.split(sep) if x]
|
||||
|
||||
def relpath(path, start=curdir):
|
||||
"""Return a relative version of a path"""
|
||||
|
||||
if not path:
|
||||
raise ValueError("no path specified")
|
||||
|
||||
start_is_unc, start_prefix, start_list = _abspath_split(start)
|
||||
path_is_unc, path_prefix, path_list = _abspath_split(path)
|
||||
|
||||
if path_is_unc ^ start_is_unc:
|
||||
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
|
||||
% (path, start))
|
||||
if path_prefix.lower() != start_prefix.lower():
|
||||
if path_is_unc:
|
||||
raise ValueError("path is on UNC root %s, start on UNC root %s"
|
||||
% (path_prefix, start_prefix))
|
||||
else:
|
||||
raise ValueError("path is on drive %s, start on drive %s"
|
||||
% (path_prefix, start_prefix))
|
||||
# Work out how much of the filepath is shared by start and path.
|
||||
i = 0
|
||||
for e1, e2 in zip(start_list, path_list):
|
||||
if e1.lower() != e2.lower():
|
||||
break
|
||||
i += 1
|
||||
|
||||
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
|
||||
if not rel_list:
|
||||
return curdir
|
||||
return join(*rel_list)
|
||||
|
||||
try:
|
||||
# The genericpath.isdir implementation uses os.stat and checks the mode
|
||||
# attribute to tell whether or not the path is a directory.
|
||||
# This is overkill on Windows - just pass the path to GetFileAttributes
|
||||
# and check the attribute from there.
|
||||
from nt import _isdir as isdir
|
||||
except ImportError:
|
||||
# Use genericpath.isdir as imported above.
|
||||
pass
|
||||
1
pype/vendor/ftrack_api/_version.py
vendored
Normal file
1
pype/vendor/ftrack_api/_version.py
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
__version__ = '1.6.0'
|
||||
2
pype/vendor/ftrack_api/accessor/__init__.py
vendored
Normal file
2
pype/vendor/ftrack_api/accessor/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
124
pype/vendor/ftrack_api/accessor/base.py
vendored
Normal file
124
pype/vendor/ftrack_api/accessor/base.py
vendored
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 ftrack
|
||||
|
||||
from builtins import object
|
||||
import abc
|
||||
|
||||
import ftrack_api.exception
|
||||
from future.utils import with_metaclass
|
||||
|
||||
|
||||
class Accessor(with_metaclass(abc.ABCMeta, object)):
|
||||
'''Provide data access to a location.
|
||||
|
||||
A location represents a specific storage, but access to that storage may
|
||||
vary. For example, both local filesystem and FTP access may be possible for
|
||||
the same storage. An accessor implements these different ways of accessing
|
||||
the same data location.
|
||||
|
||||
As different accessors may access the same location, only part of a data
|
||||
path that is commonly understood may be stored in the database. The format
|
||||
of this path should be a contract between the accessors that require access
|
||||
to the same location and is left as an implementation detail. As such, this
|
||||
system provides no guarantee that two different accessors can provide access
|
||||
to the same location, though this is a clear goal. The path stored centrally
|
||||
is referred to as the **resource identifier** and should be used when
|
||||
calling any of the accessor methods that accept a *resource_identifier*
|
||||
argument.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise location accessor.'''
|
||||
super(Accessor, self).__init__()
|
||||
|
||||
@abc.abstractmethod
|
||||
def list(self, resource_identifier):
|
||||
'''Return list of entries in *resource_identifier* container.
|
||||
|
||||
Each entry in the returned list should be a valid resource identifier.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
|
||||
*resource_identifier* does not exist or
|
||||
:exc:`~ftrack_api.exception.AccessorResourceInvalidError` if
|
||||
*resource_identifier* is not a container.
|
||||
|
||||
'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def exists(self, resource_identifier):
|
||||
'''Return if *resource_identifier* is valid and exists in location.'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_file(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a file.'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_container(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a container.'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_sequence(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a file sequence.'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def open(self, resource_identifier, mode='rb'):
|
||||
'''Return :class:`~ftrack_api.data.Data` for *resource_identifier*.'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove(self, resource_identifier):
|
||||
'''Remove *resource_identifier*.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
|
||||
*resource_identifier* does not exist.
|
||||
|
||||
'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def make_container(self, resource_identifier, recursive=True):
|
||||
'''Make a container at *resource_identifier*.
|
||||
|
||||
If *recursive* is True, also make any intermediate containers.
|
||||
|
||||
Should silently ignore existing containers and not recreate them.
|
||||
|
||||
'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_container(self, resource_identifier):
|
||||
'''Return resource_identifier of container for *resource_identifier*.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorParentResourceNotFoundError`
|
||||
if container of *resource_identifier* could not be determined.
|
||||
|
||||
'''
|
||||
|
||||
def remove_container(self, resource_identifier): # pragma: no cover
|
||||
'''Remove container at *resource_identifier*.'''
|
||||
return self.remove(resource_identifier)
|
||||
|
||||
def get_filesystem_path(self, resource_identifier): # pragma: no cover
|
||||
'''Return filesystem path for *resource_identifier*.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
|
||||
filesystem path could not be determined from *resource_identifier* or
|
||||
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
|
||||
retrieving filesystem paths is not supported by this accessor.
|
||||
|
||||
'''
|
||||
raise ftrack_api.exception.AccessorUnsupportedOperationError(
|
||||
'get_filesystem_path', resource_identifier=resource_identifier
|
||||
)
|
||||
|
||||
def get_url(self, resource_identifier):
|
||||
'''Return URL for *resource_identifier*.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
|
||||
URL could not be determined from *resource_identifier* or
|
||||
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
|
||||
retrieving URL is not supported by this accessor.
|
||||
|
||||
'''
|
||||
raise ftrack_api.exception.AccessorUnsupportedOperationError(
|
||||
'get_url', resource_identifier=resource_identifier
|
||||
)
|
||||
251
pype/vendor/ftrack_api/accessor/disk.py
vendored
Normal file
251
pype/vendor/ftrack_api/accessor/disk.py
vendored
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 ftrack
|
||||
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
import contextlib
|
||||
|
||||
import ftrack_api._python_ntpath as ntpath
|
||||
import ftrack_api.accessor.base
|
||||
import ftrack_api.data
|
||||
from ftrack_api.exception import (
|
||||
AccessorFilesystemPathError,
|
||||
AccessorUnsupportedOperationError,
|
||||
AccessorResourceNotFoundError,
|
||||
AccessorOperationFailedError,
|
||||
AccessorPermissionDeniedError,
|
||||
AccessorResourceInvalidError,
|
||||
AccessorContainerNotEmptyError,
|
||||
AccessorParentResourceNotFoundError
|
||||
)
|
||||
|
||||
|
||||
class DiskAccessor(ftrack_api.accessor.base.Accessor):
|
||||
'''Provide disk access to a location.
|
||||
|
||||
Expect resource identifiers to refer to relative filesystem paths.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, prefix, **kw):
|
||||
'''Initialise location accessor.
|
||||
|
||||
*prefix* specifies the base folder for the disk based structure and
|
||||
will be prepended to any path. It should be specified in the syntax of
|
||||
the current OS.
|
||||
|
||||
'''
|
||||
if prefix:
|
||||
prefix = os.path.expanduser(os.path.expandvars(prefix))
|
||||
prefix = os.path.abspath(prefix)
|
||||
self.prefix = prefix
|
||||
|
||||
super(DiskAccessor, self).__init__(**kw)
|
||||
|
||||
def list(self, resource_identifier):
|
||||
'''Return list of entries in *resource_identifier* container.
|
||||
|
||||
Each entry in the returned list should be a valid resource identifier.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
|
||||
*resource_identifier* does not exist or
|
||||
:exc:`~ftrack_api.exception.AccessorResourceInvalidError` if
|
||||
*resource_identifier* is not a container.
|
||||
|
||||
'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
|
||||
with error_handler(
|
||||
operation='list', resource_identifier=resource_identifier
|
||||
):
|
||||
listing = []
|
||||
for entry in os.listdir(filesystem_path):
|
||||
listing.append(os.path.join(resource_identifier, entry))
|
||||
|
||||
return listing
|
||||
|
||||
def exists(self, resource_identifier):
|
||||
'''Return if *resource_identifier* is valid and exists in location.'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
return os.path.exists(filesystem_path)
|
||||
|
||||
def is_file(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a file.'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
return os.path.isfile(filesystem_path)
|
||||
|
||||
def is_container(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a container.'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
return os.path.isdir(filesystem_path)
|
||||
|
||||
def is_sequence(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a file sequence.'''
|
||||
raise AccessorUnsupportedOperationError(operation='is_sequence')
|
||||
|
||||
def open(self, resource_identifier, mode='rb'):
|
||||
'''Return :class:`~ftrack_api.Data` for *resource_identifier*.'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
|
||||
with error_handler(
|
||||
operation='open', resource_identifier=resource_identifier
|
||||
):
|
||||
data = ftrack_api.data.File(filesystem_path, mode)
|
||||
|
||||
return data
|
||||
|
||||
def remove(self, resource_identifier):
|
||||
'''Remove *resource_identifier*.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorResourceNotFoundError` if
|
||||
*resource_identifier* does not exist.
|
||||
|
||||
'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
|
||||
if self.is_file(resource_identifier):
|
||||
with error_handler(
|
||||
operation='remove', resource_identifier=resource_identifier
|
||||
):
|
||||
os.remove(filesystem_path)
|
||||
|
||||
elif self.is_container(resource_identifier):
|
||||
with error_handler(
|
||||
operation='remove', resource_identifier=resource_identifier
|
||||
):
|
||||
os.rmdir(filesystem_path)
|
||||
|
||||
else:
|
||||
raise AccessorResourceNotFoundError(
|
||||
resource_identifier=resource_identifier
|
||||
)
|
||||
|
||||
def make_container(self, resource_identifier, recursive=True):
|
||||
'''Make a container at *resource_identifier*.
|
||||
|
||||
If *recursive* is True, also make any intermediate containers.
|
||||
|
||||
'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
|
||||
with error_handler(
|
||||
operation='makeContainer', resource_identifier=resource_identifier
|
||||
):
|
||||
try:
|
||||
if recursive:
|
||||
os.makedirs(filesystem_path)
|
||||
else:
|
||||
try:
|
||||
os.mkdir(filesystem_path)
|
||||
except OSError as error:
|
||||
if error.errno == errno.ENOENT:
|
||||
raise AccessorParentResourceNotFoundError(
|
||||
resource_identifier=resource_identifier
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
except OSError as error:
|
||||
if error.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
def get_container(self, resource_identifier):
|
||||
'''Return resource_identifier of container for *resource_identifier*.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorParentResourceNotFoundError` if
|
||||
container of *resource_identifier* could not be determined.
|
||||
|
||||
'''
|
||||
filesystem_path = self.get_filesystem_path(resource_identifier)
|
||||
|
||||
container = os.path.dirname(filesystem_path)
|
||||
|
||||
if self.prefix:
|
||||
if not container.startswith(self.prefix):
|
||||
raise AccessorParentResourceNotFoundError(
|
||||
resource_identifier=resource_identifier,
|
||||
message='Could not determine container for '
|
||||
'{resource_identifier} as container falls outside '
|
||||
'of configured prefix.'
|
||||
)
|
||||
|
||||
# Convert container filesystem path into resource identifier.
|
||||
container = container[len(self.prefix):]
|
||||
if ntpath.isabs(container):
|
||||
# Ensure that resulting path is relative by stripping any
|
||||
# leftover prefixed slashes from string.
|
||||
# E.g. If prefix was '/tmp' and path was '/tmp/foo/bar' the
|
||||
# result will be 'foo/bar'.
|
||||
container = container.lstrip('\\/')
|
||||
|
||||
return container
|
||||
|
||||
def get_filesystem_path(self, resource_identifier):
|
||||
'''Return filesystem path for *resource_identifier*.
|
||||
|
||||
For example::
|
||||
|
||||
>>> accessor = DiskAccessor('my.location', '/mountpoint')
|
||||
>>> print accessor.get_filesystem_path('test.txt')
|
||||
/mountpoint/test.txt
|
||||
>>> print accessor.get_filesystem_path('/mountpoint/test.txt')
|
||||
/mountpoint/test.txt
|
||||
|
||||
Raise :exc:`ftrack_api.exception.AccessorFilesystemPathError` if filesystem
|
||||
path could not be determined from *resource_identifier*.
|
||||
|
||||
'''
|
||||
filesystem_path = resource_identifier
|
||||
if filesystem_path:
|
||||
filesystem_path = os.path.normpath(filesystem_path)
|
||||
|
||||
if self.prefix:
|
||||
if not os.path.isabs(filesystem_path):
|
||||
filesystem_path = os.path.normpath(
|
||||
os.path.join(self.prefix, filesystem_path)
|
||||
)
|
||||
|
||||
if not filesystem_path.startswith(self.prefix):
|
||||
raise AccessorFilesystemPathError(
|
||||
resource_identifier=resource_identifier,
|
||||
message='Could not determine access path for '
|
||||
'resource_identifier outside of configured prefix: '
|
||||
'{resource_identifier}.'
|
||||
)
|
||||
|
||||
return filesystem_path
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def error_handler(**kw):
|
||||
'''Conform raised OSError/IOError exception to appropriate FTrack error.'''
|
||||
try:
|
||||
yield
|
||||
|
||||
except (OSError, IOError) as error:
|
||||
(exception_type, exception_value, traceback) = sys.exc_info()
|
||||
kw.setdefault('error', error)
|
||||
|
||||
|
||||
error_code = getattr(error, 'errno')
|
||||
if not error_code:
|
||||
raise AccessorOperationFailedError(**kw)
|
||||
|
||||
if error_code == errno.ENOENT:
|
||||
raise AccessorResourceNotFoundError(**kw)
|
||||
|
||||
elif error_code == errno.EPERM:
|
||||
raise AccessorPermissionDeniedError(**kw)
|
||||
|
||||
elif error_code == errno.ENOTEMPTY:
|
||||
raise AccessorContainerNotEmptyError(**kw)
|
||||
|
||||
elif error_code in (errno.ENOTDIR, errno.EISDIR, errno.EINVAL):
|
||||
raise AccessorResourceInvalidError(**kw)
|
||||
|
||||
else:
|
||||
raise AccessorOperationFailedError(**kw)
|
||||
|
||||
except Exception:
|
||||
raise
|
||||
240
pype/vendor/ftrack_api/accessor/server.py
vendored
Normal file
240
pype/vendor/ftrack_api/accessor/server.py
vendored
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import base64
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from .base import Accessor
|
||||
from ..data import String
|
||||
import ftrack_api.exception
|
||||
import ftrack_api.symbol
|
||||
|
||||
|
||||
class ServerFile(String):
|
||||
'''Representation of a server file.'''
|
||||
|
||||
def __init__(self, resource_identifier, session, mode='rb'):
|
||||
'''Initialise file.'''
|
||||
self.mode = mode
|
||||
self.resource_identifier = resource_identifier
|
||||
self._session = session
|
||||
self._has_read = False
|
||||
|
||||
super(ServerFile, self).__init__()
|
||||
|
||||
def flush(self):
|
||||
'''Flush all changes.'''
|
||||
super(ServerFile, self).flush()
|
||||
|
||||
if self.mode == 'wb':
|
||||
self._write()
|
||||
|
||||
def read(self, limit=None):
|
||||
'''Read file.'''
|
||||
if not self._has_read:
|
||||
self._read()
|
||||
self._has_read = True
|
||||
|
||||
return super(ServerFile, self).read(limit)
|
||||
|
||||
def _read(self):
|
||||
'''Read all remote content from key into wrapped_file.'''
|
||||
position = self.tell()
|
||||
self.seek(0)
|
||||
|
||||
response = requests.get(
|
||||
'{0}/component/get'.format(self._session.server_url),
|
||||
params={
|
||||
'id': self.resource_identifier,
|
||||
'username': self._session.api_user,
|
||||
'apiKey': self._session.api_key
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as error:
|
||||
raise ftrack_api.exception.AccessorOperationFailedError(
|
||||
'Failed to read data: {0}.'.format(error)
|
||||
)
|
||||
|
||||
for block in response.iter_content(ftrack_api.symbol.CHUNK_SIZE):
|
||||
self.wrapped_file.write(block)
|
||||
|
||||
self.flush()
|
||||
self.seek(position)
|
||||
|
||||
def _write(self):
|
||||
'''Write current data to remote key.'''
|
||||
position = self.tell()
|
||||
self.seek(0)
|
||||
|
||||
# Retrieve component from cache to construct a filename.
|
||||
component = self._session.get('FileComponent', self.resource_identifier)
|
||||
if not component:
|
||||
raise ftrack_api.exception.AccessorOperationFailedError(
|
||||
'Unable to retrieve component with id: {0}.'.format(
|
||||
self.resource_identifier
|
||||
)
|
||||
)
|
||||
|
||||
# Construct a name from component name and file_type.
|
||||
name = component['name']
|
||||
if component['file_type']:
|
||||
name = u'{0}.{1}'.format(
|
||||
name,
|
||||
component['file_type'].lstrip('.')
|
||||
)
|
||||
|
||||
try:
|
||||
metadata = self._session.get_upload_metadata(
|
||||
component_id=self.resource_identifier,
|
||||
file_name=name,
|
||||
file_size=self._get_size(),
|
||||
checksum=self._compute_checksum()
|
||||
)
|
||||
except Exception as error:
|
||||
raise ftrack_api.exception.AccessorOperationFailedError(
|
||||
'Failed to get put metadata: {0}.'.format(error)
|
||||
)
|
||||
|
||||
# Ensure at beginning of file before put.
|
||||
self.seek(0)
|
||||
|
||||
# Put the file based on the metadata.
|
||||
response = requests.put(
|
||||
metadata['url'],
|
||||
data=self.wrapped_file,
|
||||
headers=metadata['headers']
|
||||
)
|
||||
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as error:
|
||||
raise ftrack_api.exception.AccessorOperationFailedError(
|
||||
'Failed to put file to server: {0}.'.format(error)
|
||||
)
|
||||
|
||||
self.seek(position)
|
||||
|
||||
def _get_size(self):
|
||||
'''Return size of file in bytes.'''
|
||||
position = self.tell()
|
||||
self.seek(0, os.SEEK_END)
|
||||
length = self.tell()
|
||||
self.seek(position)
|
||||
return length
|
||||
|
||||
def _compute_checksum(self):
|
||||
'''Return checksum for file.'''
|
||||
fp = self.wrapped_file
|
||||
buf_size = ftrack_api.symbol.CHUNK_SIZE
|
||||
hash_obj = hashlib.md5()
|
||||
spos = fp.tell()
|
||||
|
||||
s = fp.read(buf_size)
|
||||
while s:
|
||||
hash_obj.update(s)
|
||||
s = fp.read(buf_size)
|
||||
|
||||
base64_digest = base64.encodebytes(hash_obj.digest()).decode('utf-8')
|
||||
if base64_digest[-1] == '\n':
|
||||
base64_digest = base64_digest[0:-1]
|
||||
|
||||
fp.seek(spos)
|
||||
return base64_digest
|
||||
|
||||
|
||||
class _ServerAccessor(Accessor):
|
||||
'''Provide server location access.'''
|
||||
|
||||
def __init__(self, session, **kw):
|
||||
'''Initialise location accessor.'''
|
||||
super(_ServerAccessor, self).__init__(**kw)
|
||||
|
||||
self._session = session
|
||||
|
||||
def open(self, resource_identifier, mode='rb'):
|
||||
'''Return :py:class:`~ftrack_api.Data` for *resource_identifier*.'''
|
||||
return ServerFile(resource_identifier, session=self._session, mode=mode)
|
||||
|
||||
def remove(self, resourceIdentifier):
|
||||
'''Remove *resourceIdentifier*.'''
|
||||
response = requests.get(
|
||||
'{0}/component/remove'.format(self._session.server_url),
|
||||
params={
|
||||
'id': resourceIdentifier,
|
||||
'username': self._session.api_user,
|
||||
'apiKey': self._session.api_key
|
||||
}
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ftrack_api.exception.AccessorOperationFailedError(
|
||||
'Failed to remove file.'
|
||||
)
|
||||
|
||||
def get_container(self, resource_identifier):
|
||||
'''Return resource_identifier of container for *resource_identifier*.'''
|
||||
return None
|
||||
|
||||
def make_container(self, resource_identifier, recursive=True):
|
||||
'''Make a container at *resource_identifier*.'''
|
||||
|
||||
def list(self, resource_identifier):
|
||||
'''Return list of entries in *resource_identifier* container.'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def exists(self, resource_identifier):
|
||||
'''Return if *resource_identifier* is valid and exists in location.'''
|
||||
return False
|
||||
|
||||
def is_file(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a file.'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def is_container(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a container.'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def is_sequence(self, resource_identifier):
|
||||
'''Return whether *resource_identifier* refers to a file sequence.'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_url(self, resource_identifier):
|
||||
'''Return url for *resource_identifier*.'''
|
||||
url_string = (
|
||||
u'{url}/component/get?id={id}&username={username}'
|
||||
u'&apiKey={apiKey}'
|
||||
)
|
||||
return url_string.format(
|
||||
url=self._session.server_url,
|
||||
id=resource_identifier,
|
||||
username=self._session.api_user,
|
||||
apiKey=self._session.api_key
|
||||
)
|
||||
|
||||
def get_thumbnail_url(self, resource_identifier, size=None):
|
||||
'''Return thumbnail url for *resource_identifier*.
|
||||
|
||||
Optionally, specify *size* to constrain the downscaled image to size
|
||||
x size pixels.
|
||||
'''
|
||||
url_string = (
|
||||
u'{url}/component/thumbnail?id={id}&username={username}'
|
||||
u'&apiKey={apiKey}'
|
||||
)
|
||||
url = url_string.format(
|
||||
url=self._session.server_url,
|
||||
id=resource_identifier,
|
||||
username=self._session.api_user,
|
||||
apiKey=self._session.api_key
|
||||
)
|
||||
if size:
|
||||
url += u'&size={0}'.format(size)
|
||||
|
||||
return url
|
||||
698
pype/vendor/ftrack_api/attribute.py
vendored
Normal file
698
pype/vendor/ftrack_api/attribute.py
vendored
Normal file
|
|
@ -0,0 +1,698 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from builtins import object
|
||||
import collections
|
||||
import copy
|
||||
import logging
|
||||
import functools
|
||||
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.exception
|
||||
import ftrack_api.collection
|
||||
import ftrack_api.inspection
|
||||
import ftrack_api.operation
|
||||
|
||||
logger = logging.getLogger(
|
||||
__name__
|
||||
)
|
||||
|
||||
|
||||
def merge_references(function):
|
||||
'''Decorator to handle merging of references / collections.'''
|
||||
|
||||
@functools.wraps(function)
|
||||
def get_value(attribute, entity):
|
||||
'''Merge the attribute with the local cache.'''
|
||||
|
||||
if attribute.name not in entity._inflated:
|
||||
# Only merge on first access to avoid
|
||||
# inflating them multiple times.
|
||||
|
||||
logger.debug(
|
||||
'Merging potential new data into attached '
|
||||
'entity for attribute {0}.'.format(
|
||||
attribute.name
|
||||
)
|
||||
)
|
||||
|
||||
# Local attributes.
|
||||
local_value = attribute.get_local_value(entity)
|
||||
if isinstance(
|
||||
local_value,
|
||||
(
|
||||
ftrack_api.entity.base.Entity,
|
||||
ftrack_api.collection.Collection,
|
||||
ftrack_api.collection.MappedCollectionProxy
|
||||
)
|
||||
):
|
||||
logger.debug(
|
||||
'Merging local value for attribute {0}.'.format(attribute)
|
||||
)
|
||||
|
||||
merged_local_value = entity.session._merge(
|
||||
local_value, merged=dict()
|
||||
)
|
||||
|
||||
if merged_local_value is not local_value:
|
||||
with entity.session.operation_recording(False):
|
||||
attribute.set_local_value(entity, merged_local_value)
|
||||
|
||||
# Remote attributes.
|
||||
remote_value = attribute.get_remote_value(entity)
|
||||
if isinstance(
|
||||
remote_value,
|
||||
(
|
||||
ftrack_api.entity.base.Entity,
|
||||
ftrack_api.collection.Collection,
|
||||
ftrack_api.collection.MappedCollectionProxy
|
||||
)
|
||||
):
|
||||
logger.debug(
|
||||
'Merging remote value for attribute {0}.'.format(attribute)
|
||||
)
|
||||
|
||||
merged_remote_value = entity.session._merge(
|
||||
remote_value, merged=dict()
|
||||
)
|
||||
|
||||
if merged_remote_value is not remote_value:
|
||||
attribute.set_remote_value(entity, merged_remote_value)
|
||||
|
||||
entity._inflated.add(
|
||||
attribute.name
|
||||
)
|
||||
|
||||
return function(
|
||||
attribute, entity
|
||||
)
|
||||
|
||||
return get_value
|
||||
|
||||
|
||||
class Attributes(object):
|
||||
'''Collection of properties accessible by name.'''
|
||||
|
||||
def __init__(self, attributes=None):
|
||||
super(Attributes, self).__init__()
|
||||
self._data = dict()
|
||||
if attributes is not None:
|
||||
for attribute in attributes:
|
||||
self.add(attribute)
|
||||
|
||||
def add(self, attribute):
|
||||
'''Add *attribute*.'''
|
||||
existing = self._data.get(attribute.name, None)
|
||||
if existing:
|
||||
raise ftrack_api.exception.NotUniqueError(
|
||||
'Attribute with name {0} already added as {1}'
|
||||
.format(attribute.name, existing)
|
||||
)
|
||||
|
||||
self._data[attribute.name] = attribute
|
||||
|
||||
def remove(self, attribute):
|
||||
'''Remove attribute.'''
|
||||
self._data.pop(attribute.name)
|
||||
|
||||
def get(self, name):
|
||||
'''Return attribute by *name*.
|
||||
|
||||
If no attribute matches *name* then return None.
|
||||
|
||||
'''
|
||||
return self._data.get(name, None)
|
||||
|
||||
def keys(self):
|
||||
'''Return list of attribute names.'''
|
||||
return list(self._data.keys())
|
||||
|
||||
def __contains__(self, item):
|
||||
'''Return whether *item* present.'''
|
||||
if not isinstance(item, Attribute):
|
||||
return False
|
||||
|
||||
return item.name in self._data
|
||||
|
||||
def __iter__(self):
|
||||
'''Return iterator over attributes.'''
|
||||
return iter(self._data.values())
|
||||
|
||||
def __len__(self):
|
||||
'''Return count of attributes.'''
|
||||
return len(self._data)
|
||||
|
||||
|
||||
class Attribute(object):
|
||||
'''A name and value pair persisted remotely.'''
|
||||
|
||||
def __init__(
|
||||
self, name, default_value=ftrack_api.symbol.NOT_SET, mutable=True
|
||||
):
|
||||
'''Initialise attribute with *name*.
|
||||
|
||||
*default_value* represents the default value for the attribute. It may
|
||||
be a callable. It is not used within the attribute when providing
|
||||
values, but instead exists for other parts of the system to reference.
|
||||
|
||||
If *mutable* is set to False then the local value of the attribute on an
|
||||
entity can only be set when both the existing local and remote values
|
||||
are :attr:`ftrack_api.symbol.NOT_SET`. The exception to this is when the
|
||||
target value is also :attr:`ftrack_api.symbol.NOT_SET`.
|
||||
|
||||
'''
|
||||
super(Attribute, self).__init__()
|
||||
self._name = name
|
||||
self._mutable = mutable
|
||||
self.default_value = default_value
|
||||
|
||||
self._local_key = 'local'
|
||||
self._remote_key = 'remote'
|
||||
|
||||
def __repr__(self):
|
||||
'''Return representation of entity.'''
|
||||
return '<{0}.{1}({2}) object at {3}>'.format(
|
||||
self.__module__,
|
||||
self.__class__.__name__,
|
||||
self.name,
|
||||
id(self)
|
||||
)
|
||||
|
||||
def get_entity_storage(self, entity):
|
||||
'''Return attribute storage on *entity* creating if missing.'''
|
||||
storage_key = '_ftrack_attribute_storage'
|
||||
storage = getattr(entity, storage_key, None)
|
||||
if storage is None:
|
||||
storage = collections.defaultdict(
|
||||
lambda:
|
||||
{
|
||||
self._local_key: ftrack_api.symbol.NOT_SET,
|
||||
self._remote_key: ftrack_api.symbol.NOT_SET
|
||||
}
|
||||
)
|
||||
setattr(entity, storage_key, storage)
|
||||
|
||||
return storage
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
'''Return name.'''
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def mutable(self):
|
||||
'''Return whether attribute is mutable.'''
|
||||
return self._mutable
|
||||
|
||||
def get_value(self, entity):
|
||||
'''Return current value for *entity*.
|
||||
|
||||
If a value was set locally then return it, otherwise return last known
|
||||
remote value. If no remote value yet retrieved, make a request for it
|
||||
via the session and block until available.
|
||||
|
||||
'''
|
||||
value = self.get_local_value(entity)
|
||||
if value is not ftrack_api.symbol.NOT_SET:
|
||||
return value
|
||||
|
||||
value = self.get_remote_value(entity)
|
||||
if value is not ftrack_api.symbol.NOT_SET:
|
||||
return value
|
||||
|
||||
if not entity.session.auto_populate:
|
||||
return value
|
||||
|
||||
self.populate_remote_value(entity)
|
||||
return self.get_remote_value(entity)
|
||||
|
||||
def get_local_value(self, entity):
|
||||
'''Return locally set value for *entity*.'''
|
||||
storage = self.get_entity_storage(entity)
|
||||
return storage[self.name][self._local_key]
|
||||
|
||||
def get_remote_value(self, entity):
|
||||
'''Return remote value for *entity*.
|
||||
|
||||
.. note::
|
||||
|
||||
Only return locally stored remote value, do not fetch from remote.
|
||||
|
||||
'''
|
||||
storage = self.get_entity_storage(entity)
|
||||
return storage[self.name][self._remote_key]
|
||||
|
||||
def set_local_value(self, entity, value):
|
||||
'''Set local *value* for *entity*.'''
|
||||
if (
|
||||
not self.mutable
|
||||
and self.is_set(entity)
|
||||
and value is not ftrack_api.symbol.NOT_SET
|
||||
):
|
||||
raise ftrack_api.exception.ImmutableAttributeError(self)
|
||||
|
||||
old_value = self.get_local_value(entity)
|
||||
|
||||
storage = self.get_entity_storage(entity)
|
||||
storage[self.name][self._local_key] = value
|
||||
|
||||
# Record operation.
|
||||
if entity.session.record_operations:
|
||||
entity.session.recorded_operations.push(
|
||||
ftrack_api.operation.UpdateEntityOperation(
|
||||
entity.entity_type,
|
||||
ftrack_api.inspection.primary_key(entity),
|
||||
self.name,
|
||||
old_value,
|
||||
value
|
||||
)
|
||||
)
|
||||
|
||||
def set_remote_value(self, entity, value):
|
||||
'''Set remote *value*.
|
||||
|
||||
.. note::
|
||||
|
||||
Only set locally stored remote value, do not persist to remote.
|
||||
|
||||
'''
|
||||
storage = self.get_entity_storage(entity)
|
||||
storage[self.name][self._remote_key] = value
|
||||
|
||||
def populate_remote_value(self, entity):
|
||||
'''Populate remote value for *entity*.'''
|
||||
entity.session.populate([entity], self.name)
|
||||
|
||||
def is_modified(self, entity):
|
||||
'''Return whether local value set and differs from remote.
|
||||
|
||||
.. note::
|
||||
|
||||
Will not fetch remote value so may report True even when values
|
||||
are the same on the remote.
|
||||
|
||||
'''
|
||||
local_value = self.get_local_value(entity)
|
||||
remote_value = self.get_remote_value(entity)
|
||||
return (
|
||||
local_value is not ftrack_api.symbol.NOT_SET
|
||||
and local_value != remote_value
|
||||
)
|
||||
|
||||
def is_set(self, entity):
|
||||
'''Return whether a value is set for *entity*.'''
|
||||
return any([
|
||||
self.get_local_value(entity) is not ftrack_api.symbol.NOT_SET,
|
||||
self.get_remote_value(entity) is not ftrack_api.symbol.NOT_SET
|
||||
])
|
||||
|
||||
|
||||
class ScalarAttribute(Attribute):
|
||||
'''Represent a scalar value.'''
|
||||
|
||||
def __init__(self, name, data_type, **kw):
|
||||
'''Initialise property.'''
|
||||
super(ScalarAttribute, self).__init__(name, **kw)
|
||||
self.data_type = data_type
|
||||
|
||||
|
||||
class ReferenceAttribute(Attribute):
|
||||
'''Reference another entity.'''
|
||||
|
||||
def __init__(self, name, entity_type, **kw):
|
||||
'''Initialise property.'''
|
||||
super(ReferenceAttribute, self).__init__(name, **kw)
|
||||
self.entity_type = entity_type
|
||||
|
||||
def populate_remote_value(self, entity):
|
||||
'''Populate remote value for *entity*.
|
||||
|
||||
As attribute references another entity, use that entity's configured
|
||||
default projections to auto populate useful attributes when loading.
|
||||
|
||||
'''
|
||||
reference_entity_type = entity.session.types[self.entity_type]
|
||||
default_projections = reference_entity_type.default_projections
|
||||
|
||||
projections = []
|
||||
if default_projections:
|
||||
for projection in default_projections:
|
||||
projections.append('{0}.{1}'.format(self.name, projection))
|
||||
else:
|
||||
projections.append(self.name)
|
||||
|
||||
entity.session.populate([entity], ', '.join(projections))
|
||||
|
||||
def is_modified(self, entity):
|
||||
'''Return whether a local value has been set and differs from remote.
|
||||
|
||||
.. note::
|
||||
|
||||
Will not fetch remote value so may report True even when values
|
||||
are the same on the remote.
|
||||
|
||||
'''
|
||||
local_value = self.get_local_value(entity)
|
||||
remote_value = self.get_remote_value(entity)
|
||||
|
||||
if local_value is ftrack_api.symbol.NOT_SET:
|
||||
return False
|
||||
|
||||
if remote_value is ftrack_api.symbol.NOT_SET:
|
||||
return True
|
||||
|
||||
if (
|
||||
ftrack_api.inspection.identity(local_value)
|
||||
!= ftrack_api.inspection.identity(remote_value)
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@merge_references
|
||||
def get_value(self, entity):
|
||||
return super(ReferenceAttribute, self).get_value(
|
||||
entity
|
||||
)
|
||||
|
||||
class AbstractCollectionAttribute(Attribute):
|
||||
'''Base class for collection attributes.'''
|
||||
|
||||
#: Collection class used by attribute.
|
||||
collection_class = None
|
||||
|
||||
@merge_references
|
||||
def get_value(self, entity):
|
||||
'''Return current value for *entity*.
|
||||
|
||||
If a value was set locally then return it, otherwise return last known
|
||||
remote value. If no remote value yet retrieved, make a request for it
|
||||
via the session and block until available.
|
||||
|
||||
.. note::
|
||||
|
||||
As value is a collection that is mutable, will transfer a remote
|
||||
value into the local value on access if no local value currently
|
||||
set.
|
||||
|
||||
'''
|
||||
super(AbstractCollectionAttribute, self).get_value(entity)
|
||||
|
||||
# Conditionally, copy remote value into local value so that it can be
|
||||
# mutated without side effects.
|
||||
local_value = self.get_local_value(entity)
|
||||
remote_value = self.get_remote_value(entity)
|
||||
if (
|
||||
local_value is ftrack_api.symbol.NOT_SET
|
||||
and isinstance(remote_value, self.collection_class)
|
||||
):
|
||||
try:
|
||||
with entity.session.operation_recording(False):
|
||||
self.set_local_value(entity, copy.copy(remote_value))
|
||||
except ftrack_api.exception.ImmutableAttributeError:
|
||||
pass
|
||||
|
||||
value = self.get_local_value(entity)
|
||||
|
||||
# If the local value is still not set then attempt to set it with a
|
||||
# suitable placeholder collection so that the caller can interact with
|
||||
# the collection using its normal interface. This is required for a
|
||||
# newly created entity for example. It *could* be done as a simple
|
||||
# default value, but that would incur cost for every collection even
|
||||
# when they are not modified before commit.
|
||||
if value is ftrack_api.symbol.NOT_SET:
|
||||
try:
|
||||
with entity.session.operation_recording(False):
|
||||
self.set_local_value(
|
||||
entity,
|
||||
# None should be treated as empty collection.
|
||||
None
|
||||
)
|
||||
except ftrack_api.exception.ImmutableAttributeError:
|
||||
pass
|
||||
|
||||
return self.get_local_value(entity)
|
||||
|
||||
def set_local_value(self, entity, value):
|
||||
'''Set local *value* for *entity*.'''
|
||||
if value is not ftrack_api.symbol.NOT_SET:
|
||||
value = self._adapt_to_collection(entity, value)
|
||||
value.mutable = self.mutable
|
||||
|
||||
super(AbstractCollectionAttribute, self).set_local_value(entity, value)
|
||||
|
||||
def set_remote_value(self, entity, value):
|
||||
'''Set remote *value*.
|
||||
|
||||
.. note::
|
||||
|
||||
Only set locally stored remote value, do not persist to remote.
|
||||
|
||||
'''
|
||||
if value is not ftrack_api.symbol.NOT_SET:
|
||||
value = self._adapt_to_collection(entity, value)
|
||||
value.mutable = False
|
||||
|
||||
super(AbstractCollectionAttribute, self).set_remote_value(entity, value)
|
||||
|
||||
def _adapt_to_collection(self, entity, value):
|
||||
'''Adapt *value* to appropriate collection instance for *entity*.
|
||||
|
||||
.. note::
|
||||
|
||||
If *value* is None then return a suitable empty collection.
|
||||
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class CollectionAttribute(AbstractCollectionAttribute):
|
||||
'''Represent a collection of other entities.'''
|
||||
|
||||
#: Collection class used by attribute.
|
||||
collection_class = ftrack_api.collection.Collection
|
||||
|
||||
def _adapt_to_collection(self, entity, value):
|
||||
'''Adapt *value* to a Collection instance on *entity*.'''
|
||||
|
||||
if not isinstance(value, ftrack_api.collection.Collection):
|
||||
|
||||
if value is None:
|
||||
value = ftrack_api.collection.Collection(entity, self)
|
||||
|
||||
elif isinstance(value, list):
|
||||
value = ftrack_api.collection.Collection(
|
||||
entity, self, data=value
|
||||
)
|
||||
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'Cannot convert {0!r} to collection.'.format(value)
|
||||
)
|
||||
|
||||
else:
|
||||
if value.attribute is not self:
|
||||
raise ftrack_api.exception.AttributeError(
|
||||
'Collection already bound to a different attribute'
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class KeyValueMappedCollectionAttribute(AbstractCollectionAttribute):
|
||||
'''Represent a mapped key, value collection of entities.'''
|
||||
|
||||
#: Collection class used by attribute.
|
||||
collection_class = ftrack_api.collection.KeyValueMappedCollectionProxy
|
||||
|
||||
def __init__(
|
||||
self, name, creator, key_attribute, value_attribute, **kw
|
||||
):
|
||||
'''Initialise attribute with *name*.
|
||||
|
||||
*creator* should be a function that accepts a dictionary of data and
|
||||
is used by the referenced collection to create new entities in the
|
||||
collection.
|
||||
|
||||
*key_attribute* should be the name of the attribute on an entity in
|
||||
the collection that represents the value for 'key' of the dictionary.
|
||||
|
||||
*value_attribute* should be the name of the attribute on an entity in
|
||||
the collection that represents the value for 'value' of the dictionary.
|
||||
|
||||
'''
|
||||
self.creator = creator
|
||||
self.key_attribute = key_attribute
|
||||
self.value_attribute = value_attribute
|
||||
|
||||
super(KeyValueMappedCollectionAttribute, self).__init__(name, **kw)
|
||||
|
||||
def _adapt_to_collection(self, entity, value):
|
||||
'''Adapt *value* to an *entity*.'''
|
||||
if not isinstance(
|
||||
value, ftrack_api.collection.KeyValueMappedCollectionProxy
|
||||
):
|
||||
|
||||
if value is None:
|
||||
value = ftrack_api.collection.KeyValueMappedCollectionProxy(
|
||||
ftrack_api.collection.Collection(entity, self),
|
||||
self.creator, self.key_attribute,
|
||||
self.value_attribute
|
||||
)
|
||||
|
||||
elif isinstance(value, (list, ftrack_api.collection.Collection)):
|
||||
|
||||
if isinstance(value, list):
|
||||
value = ftrack_api.collection.Collection(
|
||||
entity, self, data=value
|
||||
)
|
||||
|
||||
value = ftrack_api.collection.KeyValueMappedCollectionProxy(
|
||||
value, self.creator, self.key_attribute,
|
||||
self.value_attribute
|
||||
)
|
||||
|
||||
elif isinstance(value, collections.Mapping):
|
||||
# Convert mapping.
|
||||
# TODO: When backend model improves, revisit this logic.
|
||||
# First get existing value and delete all references. This is
|
||||
# needed because otherwise they will not be automatically
|
||||
# removed server side.
|
||||
# The following should not cause recursion as the internal
|
||||
# values should be mapped collections already.
|
||||
current_value = self.get_value(entity)
|
||||
if not isinstance(
|
||||
current_value,
|
||||
ftrack_api.collection.KeyValueMappedCollectionProxy
|
||||
):
|
||||
raise NotImplementedError(
|
||||
'Cannot adapt mapping to collection as current value '
|
||||
'type is not a KeyValueMappedCollectionProxy.'
|
||||
)
|
||||
|
||||
# Create the new collection using the existing collection as
|
||||
# basis. Then update through proxy interface to ensure all
|
||||
# internal operations called consistently (such as entity
|
||||
# deletion for key removal).
|
||||
collection = ftrack_api.collection.Collection(
|
||||
entity, self, data=current_value.collection[:]
|
||||
)
|
||||
collection_proxy = (
|
||||
ftrack_api.collection.KeyValueMappedCollectionProxy(
|
||||
collection, self.creator,
|
||||
self.key_attribute, self.value_attribute
|
||||
)
|
||||
)
|
||||
|
||||
# Remove expired keys from collection.
|
||||
expired_keys = set(current_value.keys()) - set(value.keys())
|
||||
for key in expired_keys:
|
||||
del collection_proxy[key]
|
||||
|
||||
# Set new values for existing keys / add new keys.
|
||||
for key, value in list(value.items()):
|
||||
collection_proxy[key] = value
|
||||
|
||||
value = collection_proxy
|
||||
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'Cannot convert {0!r} to collection.'.format(value)
|
||||
)
|
||||
else:
|
||||
if value.attribute is not self:
|
||||
raise ftrack_api.exception.AttributeError(
|
||||
'Collection already bound to a different attribute.'
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class CustomAttributeCollectionAttribute(AbstractCollectionAttribute):
|
||||
'''Represent a mapped custom attribute collection of entities.'''
|
||||
|
||||
#: Collection class used by attribute.
|
||||
collection_class = (
|
||||
ftrack_api.collection.CustomAttributeCollectionProxy
|
||||
)
|
||||
|
||||
def _adapt_to_collection(self, entity, value):
|
||||
'''Adapt *value* to an *entity*.'''
|
||||
if not isinstance(
|
||||
value, ftrack_api.collection.CustomAttributeCollectionProxy
|
||||
):
|
||||
|
||||
if value is None:
|
||||
value = ftrack_api.collection.CustomAttributeCollectionProxy(
|
||||
ftrack_api.collection.Collection(entity, self)
|
||||
)
|
||||
|
||||
elif isinstance(value, (list, ftrack_api.collection.Collection)):
|
||||
|
||||
# Why are we creating a new if it is a list? This will cause
|
||||
# any merge to create a new proxy and collection.
|
||||
if isinstance(value, list):
|
||||
value = ftrack_api.collection.Collection(
|
||||
entity, self, data=value
|
||||
)
|
||||
|
||||
value = ftrack_api.collection.CustomAttributeCollectionProxy(
|
||||
value
|
||||
)
|
||||
|
||||
elif isinstance(value, collections.Mapping):
|
||||
# Convert mapping.
|
||||
# TODO: When backend model improves, revisit this logic.
|
||||
# First get existing value and delete all references. This is
|
||||
# needed because otherwise they will not be automatically
|
||||
# removed server side.
|
||||
# The following should not cause recursion as the internal
|
||||
# values should be mapped collections already.
|
||||
current_value = self.get_value(entity)
|
||||
if not isinstance(
|
||||
current_value,
|
||||
ftrack_api.collection.CustomAttributeCollectionProxy
|
||||
):
|
||||
raise NotImplementedError(
|
||||
'Cannot adapt mapping to collection as current value '
|
||||
'type is not a MappedCollectionProxy.'
|
||||
)
|
||||
|
||||
# Create the new collection using the existing collection as
|
||||
# basis. Then update through proxy interface to ensure all
|
||||
# internal operations called consistently (such as entity
|
||||
# deletion for key removal).
|
||||
collection = ftrack_api.collection.Collection(
|
||||
entity, self, data=current_value.collection[:]
|
||||
)
|
||||
collection_proxy = (
|
||||
ftrack_api.collection.CustomAttributeCollectionProxy(
|
||||
collection
|
||||
)
|
||||
)
|
||||
|
||||
# Remove expired keys from collection.
|
||||
expired_keys = set(current_value.keys()) - set(value.keys())
|
||||
for key in expired_keys:
|
||||
del collection_proxy[key]
|
||||
|
||||
# Set new values for existing keys / add new keys.
|
||||
for key, value in list(value.items()):
|
||||
collection_proxy[key] = value
|
||||
|
||||
value = collection_proxy
|
||||
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'Cannot convert {0!r} to collection.'.format(value)
|
||||
)
|
||||
else:
|
||||
if value.attribute is not self:
|
||||
raise ftrack_api.exception.AttributeError(
|
||||
'Collection already bound to a different attribute.'
|
||||
)
|
||||
|
||||
return value
|
||||
597
pype/vendor/ftrack_api/cache.py
vendored
Normal file
597
pype/vendor/ftrack_api/cache.py
vendored
Normal file
|
|
@ -0,0 +1,597 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
'''Caching framework.
|
||||
|
||||
Defines a standardised :class:`Cache` interface for storing data against
|
||||
specific keys. Key generation is also standardised using a :class:`KeyMaker`
|
||||
interface.
|
||||
|
||||
Combining a Cache and KeyMaker allows for memoisation of function calls with
|
||||
respect to the arguments used by using a :class:`Memoiser`.
|
||||
|
||||
As a convenience a simple :func:`memoise` decorator is included for quick
|
||||
memoisation of function using a global cache and standard key maker.
|
||||
|
||||
'''
|
||||
|
||||
from future import standard_library
|
||||
standard_library.install_aliases()
|
||||
from builtins import str
|
||||
from past.builtins import basestring
|
||||
from builtins import object
|
||||
import collections
|
||||
import functools
|
||||
import abc
|
||||
import copy
|
||||
import inspect
|
||||
import re
|
||||
try:
|
||||
# Python 2.x
|
||||
import anydbm
|
||||
except ImportError:
|
||||
import dbm as anydbm
|
||||
|
||||
|
||||
import contextlib
|
||||
from future.utils import with_metaclass
|
||||
try:
|
||||
import cPickle as pickle
|
||||
|
||||
except:
|
||||
import pickle
|
||||
|
||||
import ftrack_api.inspection
|
||||
import ftrack_api.symbol
|
||||
|
||||
|
||||
class Cache(with_metaclass(abc.ABCMeta, object)):
|
||||
'''Cache interface.
|
||||
|
||||
Derive from this to define concrete cache implementations. A cache is
|
||||
centered around the concept of key:value pairings where the key is unique
|
||||
across the cache.
|
||||
|
||||
'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(self, key):
|
||||
'''Return value for *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def set(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove(self, key):
|
||||
'''Remove *key* and return stored value.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
|
||||
def keys(self):
|
||||
'''Return list of keys at this current time.
|
||||
|
||||
.. warning::
|
||||
|
||||
Actual keys may differ from those returned due to timing of access.
|
||||
|
||||
'''
|
||||
raise NotImplementedError() # pragma: no cover
|
||||
|
||||
def values(self):
|
||||
'''Return values for current keys.'''
|
||||
values = []
|
||||
for key in list(self.keys()):
|
||||
try:
|
||||
value = self.get(key)
|
||||
except KeyError:
|
||||
continue
|
||||
else:
|
||||
values.append(value)
|
||||
|
||||
return values
|
||||
|
||||
def clear(self, pattern=None):
|
||||
'''Remove all keys matching *pattern*.
|
||||
|
||||
*pattern* should be a regular expression string.
|
||||
|
||||
If *pattern* is None then all keys will be removed.
|
||||
|
||||
'''
|
||||
|
||||
if pattern is not None:
|
||||
pattern = re.compile(pattern)
|
||||
|
||||
for key in list(self.keys()):
|
||||
if pattern is not None:
|
||||
if not pattern.search(key):
|
||||
continue
|
||||
|
||||
try:
|
||||
self.remove(key)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
class ProxyCache(Cache):
|
||||
'''Proxy another cache.'''
|
||||
|
||||
def __init__(self, proxied):
|
||||
'''Initialise cache with *proxied* cache instance.'''
|
||||
self.proxied = proxied
|
||||
super(ProxyCache, self).__init__()
|
||||
|
||||
def get(self, key):
|
||||
'''Return value for *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
return self.proxied.get(key)
|
||||
|
||||
def set(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
return self.proxied.set(key, value)
|
||||
|
||||
def remove(self, key):
|
||||
'''Remove *key* and return stored value.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
return self.proxied.remove(key)
|
||||
|
||||
def keys(self):
|
||||
'''Return list of keys at this current time.
|
||||
|
||||
.. warning::
|
||||
|
||||
Actual keys may differ from those returned due to timing of access.
|
||||
|
||||
'''
|
||||
return list(self.proxied.keys())
|
||||
|
||||
|
||||
class LayeredCache(Cache):
|
||||
'''Layered cache.'''
|
||||
|
||||
def __init__(self, caches):
|
||||
'''Initialise cache with *caches*.'''
|
||||
super(LayeredCache, self).__init__()
|
||||
self.caches = caches
|
||||
|
||||
def get(self, key):
|
||||
'''Return value for *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
Attempt to retrieve from cache layers in turn, starting with shallowest.
|
||||
If value retrieved, then also set the value in each higher level cache
|
||||
up from where retrieved.
|
||||
|
||||
'''
|
||||
target_caches = []
|
||||
value = ftrack_api.symbol.NOT_SET
|
||||
|
||||
for cache in self.caches:
|
||||
try:
|
||||
value = cache.get(key)
|
||||
except KeyError:
|
||||
target_caches.append(cache)
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
if value is ftrack_api.symbol.NOT_SET:
|
||||
raise KeyError(key)
|
||||
|
||||
# Set value on all higher level caches.
|
||||
for cache in target_caches:
|
||||
cache.set(key, value)
|
||||
|
||||
return value
|
||||
|
||||
def set(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
for cache in self.caches:
|
||||
cache.set(key, value)
|
||||
|
||||
def remove(self, key):
|
||||
'''Remove *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found in any layer.
|
||||
|
||||
'''
|
||||
removed = False
|
||||
for cache in self.caches:
|
||||
try:
|
||||
cache.remove(key)
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
removed = True
|
||||
|
||||
if not removed:
|
||||
raise KeyError(key)
|
||||
|
||||
def keys(self):
|
||||
'''Return list of keys at this current time.
|
||||
|
||||
.. warning::
|
||||
|
||||
Actual keys may differ from those returned due to timing of access.
|
||||
|
||||
'''
|
||||
keys = []
|
||||
for cache in self.caches:
|
||||
keys.extend(list(cache.keys()))
|
||||
|
||||
return list(set(keys))
|
||||
|
||||
|
||||
class MemoryCache(Cache):
|
||||
'''Memory based cache.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise cache.'''
|
||||
self._cache = {}
|
||||
super(MemoryCache, self).__init__()
|
||||
|
||||
def get(self, key):
|
||||
'''Return value for *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
return self._cache[key]
|
||||
|
||||
def set(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
self._cache[key] = value
|
||||
|
||||
def remove(self, key):
|
||||
'''Remove *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
del self._cache[key]
|
||||
|
||||
def keys(self):
|
||||
'''Return list of keys at this current time.
|
||||
|
||||
.. warning::
|
||||
|
||||
Actual keys may differ from those returned due to timing of access.
|
||||
|
||||
'''
|
||||
return list(self._cache.keys())
|
||||
|
||||
|
||||
class FileCache(Cache):
|
||||
'''File based cache that uses :mod:`anydbm` module.
|
||||
|
||||
.. note::
|
||||
|
||||
No locking of the underlying file is performed.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, path):
|
||||
'''Initialise cache at *path*.'''
|
||||
self.path = path
|
||||
|
||||
# Initialise cache.
|
||||
cache = anydbm.open(self.path, 'c')
|
||||
cache.close()
|
||||
|
||||
super(FileCache, self).__init__()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _database(self):
|
||||
'''Yield opened database file.'''
|
||||
cache = anydbm.open(self.path, 'w')
|
||||
try:
|
||||
yield cache
|
||||
finally:
|
||||
cache.close()
|
||||
|
||||
def get(self, key):
|
||||
'''Return value for *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
with self._database() as cache:
|
||||
return cache[key.encode('ascii')].decode('utf-8')
|
||||
|
||||
def set(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
with self._database() as cache:
|
||||
cache[key.encode('ascii')] = value
|
||||
|
||||
def remove(self, key):
|
||||
'''Remove *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
with self._database() as cache:
|
||||
del cache[key.encode('ascii')]
|
||||
|
||||
def keys(self):
|
||||
'''Return list of keys at this current time.
|
||||
|
||||
.. warning::
|
||||
|
||||
Actual keys may differ from those returned due to timing of access.
|
||||
|
||||
'''
|
||||
with self._database() as cache:
|
||||
return [s.decode('utf-8') for s in cache.keys()]
|
||||
#return list(map(str, cache.keys()))
|
||||
|
||||
|
||||
class SerialisedCache(ProxyCache):
|
||||
'''Proxied cache that stores values as serialised data.'''
|
||||
|
||||
def __init__(self, proxied, encode=None, decode=None):
|
||||
'''Initialise cache with *encode* and *decode* callables.
|
||||
|
||||
*proxied* is the underlying cache to use for storage.
|
||||
|
||||
'''
|
||||
self.encode = encode
|
||||
self.decode = decode
|
||||
super(SerialisedCache, self).__init__(proxied)
|
||||
|
||||
def get(self, key):
|
||||
'''Return value for *key*.
|
||||
|
||||
Raise :exc:`KeyError` if *key* not found.
|
||||
|
||||
'''
|
||||
value = super(SerialisedCache, self).get(key)
|
||||
if self.decode:
|
||||
value = self.decode(value)
|
||||
|
||||
return value
|
||||
|
||||
def set(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
if self.encode:
|
||||
value = self.encode(value)
|
||||
|
||||
super(SerialisedCache, self).set(key, value)
|
||||
|
||||
|
||||
class KeyMaker(with_metaclass(abc.ABCMeta, object)):
|
||||
'''Generate unique keys.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise key maker.'''
|
||||
super(KeyMaker, self).__init__()
|
||||
self.item_separator = ''
|
||||
|
||||
def key(self, *items):
|
||||
'''Return key for *items*.'''
|
||||
keys = []
|
||||
for item in items:
|
||||
keys.append(self._key(item))
|
||||
|
||||
return self.item_separator.join(keys)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _key(self, obj):
|
||||
'''Return key for *obj*.'''
|
||||
|
||||
|
||||
class StringKeyMaker(KeyMaker):
|
||||
'''Generate string key.'''
|
||||
|
||||
def _key(self, obj):
|
||||
'''Return key for *obj*.'''
|
||||
return str(obj)
|
||||
|
||||
|
||||
class ObjectKeyMaker(KeyMaker):
|
||||
'''Generate unique keys for objects.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise key maker.'''
|
||||
super(ObjectKeyMaker, self).__init__()
|
||||
self.item_separator = b'\0'
|
||||
self.mapping_identifier = b'\1'
|
||||
self.mapping_pair_separator = b'\2'
|
||||
self.iterable_identifier = b'\3'
|
||||
self.name_identifier = b'\4'
|
||||
|
||||
def _key(self, item):
|
||||
return self.__key(item)
|
||||
|
||||
def __key(self, item):
|
||||
'''Return key for *item*.
|
||||
|
||||
Returned key will be a pickle like string representing the *item*. This
|
||||
allows for typically non-hashable objects to be used in key generation
|
||||
(such as dictionaries).
|
||||
|
||||
If *item* is iterable then each item in it shall also be passed to this
|
||||
method to ensure correct key generation.
|
||||
|
||||
Special markers are used to distinguish handling of specific cases in
|
||||
order to ensure uniqueness of key corresponds directly to *item*.
|
||||
|
||||
Example::
|
||||
|
||||
>>> key_maker = ObjectKeyMaker()
|
||||
>>> def add(x, y):
|
||||
... "Return sum of *x* and *y*."
|
||||
... return x + y
|
||||
...
|
||||
>>> key_maker.key(add, (1, 2))
|
||||
'\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x02.\x03'
|
||||
>>> key_maker.key(add, (1, 3))
|
||||
'\x04add\x00__main__\x00\x03\x80\x02K\x01.\x00\x80\x02K\x03.\x03'
|
||||
|
||||
'''
|
||||
|
||||
|
||||
# TODO: Consider using a more robust and comprehensive solution such as
|
||||
# dill (https://github.com/uqfoundation/dill).
|
||||
if isinstance(item, collections.Iterable):
|
||||
|
||||
if isinstance(item, basestring):
|
||||
return pickle.dumps(item, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
if isinstance(item, collections.Mapping):
|
||||
contents = self.item_separator.join([
|
||||
(
|
||||
self._key(key) +
|
||||
self.mapping_pair_separator +
|
||||
self._key(value)
|
||||
)
|
||||
for key, value in sorted(item.items())
|
||||
])
|
||||
|
||||
return (
|
||||
self.mapping_identifier +
|
||||
contents +
|
||||
self.mapping_identifier
|
||||
)
|
||||
else:
|
||||
contents = self.item_separator.join([
|
||||
self._key(item) for item in item
|
||||
])
|
||||
return (
|
||||
self.iterable_identifier +
|
||||
contents +
|
||||
self.iterable_identifier
|
||||
)
|
||||
|
||||
elif inspect.ismethod(item):
|
||||
|
||||
return b''.join((
|
||||
self.name_identifier,
|
||||
item.__name__.encode(),
|
||||
self.item_separator,
|
||||
item.__self__.__class__.__name__.encode(),
|
||||
self.item_separator,
|
||||
item.__module__.encode()
|
||||
))
|
||||
|
||||
elif inspect.isfunction(item) or inspect.isclass(item):
|
||||
return b''.join((
|
||||
self.name_identifier,
|
||||
item.__name__.encode(),
|
||||
self.item_separator,
|
||||
item.__module__.encode()
|
||||
))
|
||||
|
||||
elif inspect.isbuiltin(item):
|
||||
return self.name_identifier + item.__name__.encode()
|
||||
|
||||
else:
|
||||
return pickle.dumps(item, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
|
||||
class Memoiser(object):
|
||||
'''Memoise function calls using a :class:`KeyMaker` and :class:`Cache`.
|
||||
|
||||
Example::
|
||||
|
||||
>>> memoiser = Memoiser(MemoryCache(), ObjectKeyMaker())
|
||||
>>> def add(x, y):
|
||||
... "Return sum of *x* and *y*."
|
||||
... print 'Called'
|
||||
... return x + y
|
||||
...
|
||||
>>> memoiser.call(add, (1, 2), {})
|
||||
Called
|
||||
>>> memoiser.call(add, (1, 2), {})
|
||||
>>> memoiser.call(add, (1, 3), {})
|
||||
Called
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, cache=None, key_maker=None, return_copies=True):
|
||||
'''Initialise with *cache* and *key_maker* to use.
|
||||
|
||||
If *cache* is not specified a default :class:`MemoryCache` will be
|
||||
used. Similarly, if *key_maker* is not specified a default
|
||||
:class:`ObjectKeyMaker` will be used.
|
||||
|
||||
If *return_copies* is True then all results returned from the cache will
|
||||
be deep copies to avoid indirect mutation of cached values.
|
||||
|
||||
'''
|
||||
self.cache = cache
|
||||
if self.cache is None:
|
||||
self.cache = MemoryCache()
|
||||
|
||||
self.key_maker = key_maker
|
||||
if self.key_maker is None:
|
||||
self.key_maker = ObjectKeyMaker()
|
||||
|
||||
self.return_copies = return_copies
|
||||
super(Memoiser, self).__init__()
|
||||
|
||||
def call(self, function, args=None, kw=None):
|
||||
'''Call *function* with *args* and *kw* and return result.
|
||||
|
||||
If *function* was previously called with exactly the same arguments
|
||||
then return cached result if available.
|
||||
|
||||
Store result for call in cache.
|
||||
|
||||
'''
|
||||
if args is None:
|
||||
args = ()
|
||||
|
||||
if kw is None:
|
||||
kw = {}
|
||||
|
||||
# Support arguments being passed as positionals or keywords.
|
||||
arguments = inspect.getcallargs(function, *args, **kw)
|
||||
|
||||
key = self.key_maker.key(function, arguments)
|
||||
try:
|
||||
value = self.cache.get(key)
|
||||
|
||||
except KeyError:
|
||||
value = function(*args, **kw)
|
||||
self.cache.set(key, value)
|
||||
|
||||
# If requested, deep copy value to return in order to avoid cached value
|
||||
# being inadvertently altered by the caller.
|
||||
if self.return_copies:
|
||||
value = copy.deepcopy(value)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def memoise_decorator(memoiser):
|
||||
'''Decorator to memoise function calls using *memoiser*.'''
|
||||
def outer(function):
|
||||
|
||||
@functools.wraps(function)
|
||||
def inner(*args, **kw):
|
||||
return memoiser.call(function, args, kw)
|
||||
|
||||
return inner
|
||||
|
||||
return outer
|
||||
|
||||
|
||||
#: Default memoiser.
|
||||
memoiser = Memoiser()
|
||||
|
||||
#: Default memoise decorator using standard cache and key maker.
|
||||
memoise = memoise_decorator(memoiser)
|
||||
514
pype/vendor/ftrack_api/collection.py
vendored
Normal file
514
pype/vendor/ftrack_api/collection.py
vendored
Normal file
|
|
@ -0,0 +1,514 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from builtins import str
|
||||
import logging
|
||||
|
||||
import collections
|
||||
import copy
|
||||
|
||||
import ftrack_api.exception
|
||||
import ftrack_api.inspection
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.operation
|
||||
import ftrack_api.cache
|
||||
from ftrack_api.logging import LazyLogMessage as L
|
||||
|
||||
|
||||
class Collection(collections.MutableSequence):
|
||||
'''A collection of entities.'''
|
||||
|
||||
def __init__(self, entity, attribute, mutable=True, data=None):
|
||||
'''Initialise collection.'''
|
||||
self.entity = entity
|
||||
self.attribute = attribute
|
||||
self._data = []
|
||||
self._identities = set()
|
||||
|
||||
# Set initial dataset.
|
||||
# Note: For initialisation, immutability is deferred till after initial
|
||||
# population as otherwise there would be no public way to initialise an
|
||||
# immutable collection. The reason self._data is not just set directly
|
||||
# is to ensure other logic can be applied without special handling.
|
||||
self.mutable = True
|
||||
try:
|
||||
if data is None:
|
||||
data = []
|
||||
|
||||
with self.entity.session.operation_recording(False):
|
||||
self.extend(data)
|
||||
finally:
|
||||
self.mutable = mutable
|
||||
|
||||
def _identity_key(self, entity):
|
||||
'''Return identity key for *entity*.'''
|
||||
return str(ftrack_api.inspection.identity(entity))
|
||||
|
||||
def __copy__(self):
|
||||
'''Return shallow copy.
|
||||
|
||||
.. note::
|
||||
|
||||
To maintain expectations on usage, the shallow copy will include a
|
||||
shallow copy of the underlying data store.
|
||||
|
||||
'''
|
||||
cls = self.__class__
|
||||
copied_instance = cls.__new__(cls)
|
||||
copied_instance.__dict__.update(self.__dict__)
|
||||
copied_instance._data = copy.copy(self._data)
|
||||
copied_instance._identities = copy.copy(self._identities)
|
||||
|
||||
return copied_instance
|
||||
|
||||
def _notify(self, old_value):
|
||||
'''Notify about modification.'''
|
||||
# Record operation.
|
||||
if self.entity.session.record_operations:
|
||||
self.entity.session.recorded_operations.push(
|
||||
ftrack_api.operation.UpdateEntityOperation(
|
||||
self.entity.entity_type,
|
||||
ftrack_api.inspection.primary_key(self.entity),
|
||||
self.attribute.name,
|
||||
old_value,
|
||||
self
|
||||
)
|
||||
)
|
||||
|
||||
def insert(self, index, item):
|
||||
'''Insert *item* at *index*.'''
|
||||
if not self.mutable:
|
||||
raise ftrack_api.exception.ImmutableCollectionError(self)
|
||||
|
||||
if item in self:
|
||||
raise ftrack_api.exception.DuplicateItemInCollectionError(
|
||||
item, self
|
||||
)
|
||||
|
||||
old_value = copy.copy(self)
|
||||
self._data.insert(index, item)
|
||||
self._identities.add(self._identity_key(item))
|
||||
self._notify(old_value)
|
||||
|
||||
def __contains__(self, value):
|
||||
'''Return whether *value* present in collection.'''
|
||||
return self._identity_key(value) in self._identities
|
||||
|
||||
def __getitem__(self, index):
|
||||
'''Return item at *index*.'''
|
||||
return self._data[index]
|
||||
|
||||
def __setitem__(self, index, item):
|
||||
'''Set *item* against *index*.'''
|
||||
if not self.mutable:
|
||||
raise ftrack_api.exception.ImmutableCollectionError(self)
|
||||
|
||||
try:
|
||||
existing_index = self.index(item)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if index != existing_index:
|
||||
raise ftrack_api.exception.DuplicateItemInCollectionError(
|
||||
item, self
|
||||
)
|
||||
|
||||
old_value = copy.copy(self)
|
||||
try:
|
||||
existing_item = self._data[index]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
self._identities.remove(self._identity_key(existing_item))
|
||||
|
||||
self._data[index] = item
|
||||
self._identities.add(self._identity_key(item))
|
||||
self._notify(old_value)
|
||||
|
||||
def __delitem__(self, index):
|
||||
'''Remove item at *index*.'''
|
||||
if not self.mutable:
|
||||
raise ftrack_api.exception.ImmutableCollectionError(self)
|
||||
|
||||
old_value = copy.copy(self)
|
||||
item = self._data[index]
|
||||
del self._data[index]
|
||||
self._identities.remove(self._identity_key(item))
|
||||
self._notify(old_value)
|
||||
|
||||
def __len__(self):
|
||||
'''Return count of items.'''
|
||||
return len(self._data)
|
||||
|
||||
def __eq__(self, other):
|
||||
'''Return whether this collection is equal to *other*.'''
|
||||
if not isinstance(other, Collection):
|
||||
return False
|
||||
|
||||
return sorted(self._identities) == sorted(other._identities)
|
||||
|
||||
def __ne__(self, other):
|
||||
'''Return whether this collection is not equal to *other*.'''
|
||||
return not self == other
|
||||
|
||||
|
||||
class MappedCollectionProxy(collections.MutableMapping):
|
||||
'''Common base class for mapped collection of entities.'''
|
||||
|
||||
def __init__(self, collection):
|
||||
'''Initialise proxy for *collection*.'''
|
||||
self.logger = logging.getLogger(
|
||||
__name__ + '.' + self.__class__.__name__
|
||||
)
|
||||
self.collection = collection
|
||||
super(MappedCollectionProxy, self).__init__()
|
||||
|
||||
def __copy__(self):
|
||||
'''Return shallow copy.
|
||||
|
||||
.. note::
|
||||
|
||||
To maintain expectations on usage, the shallow copy will include a
|
||||
shallow copy of the underlying collection.
|
||||
|
||||
'''
|
||||
cls = self.__class__
|
||||
copied_instance = cls.__new__(cls)
|
||||
copied_instance.__dict__.update(self.__dict__)
|
||||
copied_instance.collection = copy.copy(self.collection)
|
||||
|
||||
return copied_instance
|
||||
|
||||
@property
|
||||
def mutable(self):
|
||||
'''Return whether collection is mutable.'''
|
||||
return self.collection.mutable
|
||||
|
||||
@mutable.setter
|
||||
def mutable(self, value):
|
||||
'''Set whether collection is mutable to *value*.'''
|
||||
self.collection.mutable = value
|
||||
|
||||
@property
|
||||
def attribute(self):
|
||||
'''Return attribute bound to.'''
|
||||
return self.collection.attribute
|
||||
|
||||
@attribute.setter
|
||||
def attribute(self, value):
|
||||
'''Set bound attribute to *value*.'''
|
||||
self.collection.attribute = value
|
||||
|
||||
|
||||
class KeyValueMappedCollectionProxy(MappedCollectionProxy):
|
||||
'''A mapped collection of key, value entities.
|
||||
|
||||
Proxy a standard :class:`Collection` as a mapping where certain attributes
|
||||
from the entities in the collection are mapped to key, value pairs.
|
||||
|
||||
For example::
|
||||
|
||||
>>> collection = [Metadata(key='foo', value='bar'), ...]
|
||||
>>> mapped = KeyValueMappedCollectionProxy(
|
||||
... collection, create_metadata,
|
||||
... key_attribute='key', value_attribute='value'
|
||||
... )
|
||||
>>> print mapped['foo']
|
||||
'bar'
|
||||
>>> mapped['bam'] = 'biz'
|
||||
>>> print mapped.collection[-1]
|
||||
Metadata(key='bam', value='biz')
|
||||
|
||||
'''
|
||||
|
||||
def __init__(
|
||||
self, collection, creator, key_attribute, value_attribute
|
||||
):
|
||||
'''Initialise collection.'''
|
||||
self.creator = creator
|
||||
self.key_attribute = key_attribute
|
||||
self.value_attribute = value_attribute
|
||||
super(KeyValueMappedCollectionProxy, self).__init__(collection)
|
||||
|
||||
def _get_entity_by_key(self, key):
|
||||
'''Return entity instance with matching *key* from collection.'''
|
||||
for entity in self.collection:
|
||||
if entity[self.key_attribute] == key:
|
||||
return entity
|
||||
|
||||
raise KeyError(key)
|
||||
|
||||
def __getitem__(self, key):
|
||||
'''Return value for *key*.'''
|
||||
entity = self._get_entity_by_key(key)
|
||||
return entity[self.value_attribute]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
try:
|
||||
entity = self._get_entity_by_key(key)
|
||||
except KeyError:
|
||||
data = {
|
||||
self.key_attribute: key,
|
||||
self.value_attribute: value
|
||||
}
|
||||
entity = self.creator(self, data)
|
||||
|
||||
if (
|
||||
ftrack_api.inspection.state(entity) is
|
||||
ftrack_api.symbol.CREATED
|
||||
):
|
||||
# Persisting this entity will be handled here, record the
|
||||
# operation.
|
||||
self.collection.append(entity)
|
||||
|
||||
else:
|
||||
# The entity is created and persisted separately by the
|
||||
# creator. Do not record this operation.
|
||||
with self.collection.entity.session.operation_recording(False):
|
||||
# Do not record this operation since it will trigger
|
||||
# redudant and potentially failing operations.
|
||||
self.collection.append(entity)
|
||||
|
||||
else:
|
||||
entity[self.value_attribute] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
'''Remove and delete *key*.
|
||||
|
||||
.. note::
|
||||
|
||||
The associated entity will be deleted as well.
|
||||
|
||||
'''
|
||||
for index, entity in enumerate(self.collection):
|
||||
if entity[self.key_attribute] == key:
|
||||
break
|
||||
else:
|
||||
raise KeyError(key)
|
||||
|
||||
del self.collection[index]
|
||||
entity.session.delete(entity)
|
||||
|
||||
def __iter__(self):
|
||||
'''Iterate over all keys.'''
|
||||
keys = set()
|
||||
for entity in self.collection:
|
||||
keys.add(entity[self.key_attribute])
|
||||
|
||||
return iter(keys)
|
||||
|
||||
def __len__(self):
|
||||
'''Return count of keys.'''
|
||||
keys = set()
|
||||
for entity in self.collection:
|
||||
keys.add(entity[self.key_attribute])
|
||||
|
||||
return len(keys)
|
||||
|
||||
def keys(self):
|
||||
# COMPAT for unit tests..
|
||||
return list(super(
|
||||
KeyValueMappedCollectionProxy, self
|
||||
).keys())
|
||||
|
||||
|
||||
class PerSessionDefaultKeyMaker(ftrack_api.cache.KeyMaker):
|
||||
'''Generate key for session.'''
|
||||
|
||||
def _key(self, obj):
|
||||
'''Return key for *obj*.'''
|
||||
if isinstance(obj, dict):
|
||||
session = obj.get('session')
|
||||
if session is not None:
|
||||
# Key by session only.
|
||||
return str(id(session))
|
||||
|
||||
return str(obj)
|
||||
|
||||
|
||||
#: Memoiser for use with callables that should be called once per session.
|
||||
memoise_session = ftrack_api.cache.memoise_decorator(
|
||||
ftrack_api.cache.Memoiser(
|
||||
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@memoise_session
|
||||
def _get_custom_attribute_configurations(session):
|
||||
'''Return list of custom attribute configurations.
|
||||
|
||||
The configuration objects will have key, project_id, id and object_type_id
|
||||
populated.
|
||||
|
||||
'''
|
||||
return session.query(
|
||||
'select key, project_id, id, object_type_id, entity_type from '
|
||||
'CustomAttributeConfiguration'
|
||||
).all()
|
||||
|
||||
|
||||
class CustomAttributeCollectionProxy(MappedCollectionProxy):
|
||||
'''A mapped collection of custom attribute value entities.'''
|
||||
|
||||
def __init__(
|
||||
self, collection
|
||||
):
|
||||
'''Initialise collection.'''
|
||||
self.key_attribute = 'configuration_id'
|
||||
self.value_attribute = 'value'
|
||||
super(CustomAttributeCollectionProxy, self).__init__(collection)
|
||||
|
||||
def _get_entity_configurations(self):
|
||||
'''Return all configurations for current collection entity.'''
|
||||
entity = self.collection.entity
|
||||
entity_type = None
|
||||
project_id = None
|
||||
object_type_id = None
|
||||
|
||||
if 'object_type_id' in list(entity.keys()):
|
||||
project_id = entity['project_id']
|
||||
entity_type = 'task'
|
||||
object_type_id = entity['object_type_id']
|
||||
|
||||
if entity.entity_type == 'AssetVersion':
|
||||
project_id = entity['asset']['parent']['project_id']
|
||||
entity_type = 'assetversion'
|
||||
|
||||
if entity.entity_type == 'Asset':
|
||||
project_id = entity['parent']['project_id']
|
||||
entity_type = 'asset'
|
||||
|
||||
if entity.entity_type == 'Project':
|
||||
project_id = entity['id']
|
||||
entity_type = 'show'
|
||||
|
||||
if entity.entity_type == 'User':
|
||||
entity_type = 'user'
|
||||
|
||||
if entity_type is None:
|
||||
raise ValueError(
|
||||
'Entity {!r} not supported.'.format(entity)
|
||||
)
|
||||
|
||||
configurations = []
|
||||
for configuration in _get_custom_attribute_configurations(
|
||||
entity.session
|
||||
):
|
||||
if (
|
||||
configuration['entity_type'] == entity_type and
|
||||
configuration['project_id'] in (project_id, None) and
|
||||
configuration['object_type_id'] == object_type_id
|
||||
):
|
||||
configurations.append(configuration)
|
||||
|
||||
# Return with global configurations at the end of the list. This is done
|
||||
# so that global conigurations are shadowed by project specific if the
|
||||
# configurations list is looped when looking for a matching `key`.
|
||||
return sorted(
|
||||
configurations, key=lambda item: item['project_id'] is None
|
||||
)
|
||||
|
||||
def _get_keys(self):
|
||||
'''Return a list of all keys.'''
|
||||
keys = []
|
||||
for configuration in self._get_entity_configurations():
|
||||
keys.append(configuration['key'])
|
||||
|
||||
return keys
|
||||
|
||||
def _get_entity_by_key(self, key):
|
||||
'''Return entity instance with matching *key* from collection.'''
|
||||
configuration_id = self.get_configuration_id_from_key(key)
|
||||
for entity in self.collection:
|
||||
if entity[self.key_attribute] == configuration_id:
|
||||
return entity
|
||||
|
||||
return None
|
||||
|
||||
def get_configuration_id_from_key(self, key):
|
||||
'''Return id of configuration with matching *key*.
|
||||
|
||||
Raise :exc:`KeyError` if no configuration with matching *key* found.
|
||||
|
||||
'''
|
||||
for configuration in self._get_entity_configurations():
|
||||
if key == configuration['key']:
|
||||
return configuration['id']
|
||||
|
||||
raise KeyError(key)
|
||||
|
||||
def __getitem__(self, key):
|
||||
'''Return value for *key*.'''
|
||||
entity = self._get_entity_by_key(key)
|
||||
|
||||
if entity:
|
||||
return entity[self.value_attribute]
|
||||
|
||||
for configuration in self._get_entity_configurations():
|
||||
if configuration['key'] == key:
|
||||
return configuration['default']
|
||||
|
||||
raise KeyError(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
custom_attribute_value = self._get_entity_by_key(key)
|
||||
|
||||
if custom_attribute_value:
|
||||
custom_attribute_value[self.value_attribute] = value
|
||||
else:
|
||||
entity = self.collection.entity
|
||||
session = entity.session
|
||||
data = {
|
||||
self.key_attribute: self.get_configuration_id_from_key(key),
|
||||
self.value_attribute: value,
|
||||
'entity_id': entity['id']
|
||||
}
|
||||
|
||||
# Make sure to use the currently active collection. This is
|
||||
# necessary since a merge might have replaced the current one.
|
||||
self.collection.entity['custom_attributes'].collection.append(
|
||||
session.create('CustomAttributeValue', data)
|
||||
)
|
||||
|
||||
def __delitem__(self, key):
|
||||
'''Remove and delete *key*.
|
||||
|
||||
.. note::
|
||||
|
||||
The associated entity will be deleted as well.
|
||||
|
||||
'''
|
||||
custom_attribute_value = self._get_entity_by_key(key)
|
||||
|
||||
if custom_attribute_value:
|
||||
index = self.collection.index(custom_attribute_value)
|
||||
del self.collection[index]
|
||||
|
||||
custom_attribute_value.session.delete(custom_attribute_value)
|
||||
else:
|
||||
self.logger.warning(L(
|
||||
'Cannot delete {0!r} on {1!r}, no custom attribute value set.',
|
||||
key, self.collection.entity
|
||||
))
|
||||
|
||||
def __eq__(self, collection):
|
||||
'''Return True if *collection* equals proxy collection.'''
|
||||
if collection is ftrack_api.symbol.NOT_SET:
|
||||
return False
|
||||
|
||||
return collection.collection == self.collection
|
||||
|
||||
def __iter__(self):
|
||||
'''Iterate over all keys.'''
|
||||
keys = self._get_keys()
|
||||
return iter(keys)
|
||||
|
||||
def __len__(self):
|
||||
'''Return count of keys.'''
|
||||
keys = self._get_keys()
|
||||
return len(keys)
|
||||
133
pype/vendor/ftrack_api/data.py
vendored
Normal file
133
pype/vendor/ftrack_api/data.py
vendored
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 ftrack
|
||||
|
||||
from builtins import object
|
||||
import os
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import tempfile
|
||||
from future.utils import with_metaclass
|
||||
|
||||
|
||||
class Data(with_metaclass(ABCMeta, object)):
|
||||
'''File-like object for manipulating data.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise data access.'''
|
||||
self.closed = False
|
||||
|
||||
@abstractmethod
|
||||
def read(self, limit=None):
|
||||
'''Return content from current position up to *limit*.'''
|
||||
|
||||
@abstractmethod
|
||||
def write(self, content):
|
||||
'''Write content at current position.'''
|
||||
|
||||
def flush(self):
|
||||
'''Flush buffers ensuring data written.'''
|
||||
|
||||
def seek(self, offset, whence=os.SEEK_SET):
|
||||
'''Move internal pointer by *offset*.
|
||||
|
||||
The *whence* argument is optional and defaults to os.SEEK_SET or 0
|
||||
(absolute file positioning); other values are os.SEEK_CUR or 1
|
||||
(seek relative to the current position) and os.SEEK_END or 2
|
||||
(seek relative to the file's end).
|
||||
|
||||
'''
|
||||
raise NotImplementedError('Seek not supported.')
|
||||
|
||||
def tell(self):
|
||||
'''Return current position of internal pointer.'''
|
||||
raise NotImplementedError('Tell not supported.')
|
||||
|
||||
def close(self):
|
||||
'''Flush buffers and prevent further access.'''
|
||||
self.flush()
|
||||
self.closed = True
|
||||
|
||||
|
||||
class FileWrapper(Data):
|
||||
'''Data wrapper for Python file objects.'''
|
||||
|
||||
def __init__(self, wrapped_file):
|
||||
'''Initialise access to *wrapped_file*.'''
|
||||
self.wrapped_file = wrapped_file
|
||||
self._read_since_last_write = False
|
||||
super(FileWrapper, self).__init__()
|
||||
|
||||
def read(self, limit=None):
|
||||
'''Return content from current position up to *limit*.'''
|
||||
self._read_since_last_write = True
|
||||
|
||||
if limit is None:
|
||||
limit = -1
|
||||
|
||||
return self.wrapped_file.read(limit)
|
||||
|
||||
def write(self, content):
|
||||
'''Write content at current position.'''
|
||||
if self._read_since_last_write:
|
||||
# Windows requires a seek before switching from read to write.
|
||||
self.seek(self.tell())
|
||||
|
||||
self.wrapped_file.write(content)
|
||||
self._read_since_last_write = False
|
||||
|
||||
def flush(self):
|
||||
'''Flush buffers ensuring data written.'''
|
||||
super(FileWrapper, self).flush()
|
||||
if hasattr(self.wrapped_file, 'flush'):
|
||||
self.wrapped_file.flush()
|
||||
|
||||
def seek(self, offset, whence=os.SEEK_SET):
|
||||
'''Move internal pointer by *offset*.'''
|
||||
self.wrapped_file.seek(offset, whence)
|
||||
|
||||
def tell(self):
|
||||
'''Return current position of internal pointer.'''
|
||||
return self.wrapped_file.tell()
|
||||
|
||||
def close(self):
|
||||
'''Flush buffers and prevent further access.'''
|
||||
if not self.closed:
|
||||
super(FileWrapper, self).close()
|
||||
if hasattr(self.wrapped_file, 'close'):
|
||||
self.wrapped_file.close()
|
||||
|
||||
|
||||
class File(FileWrapper):
|
||||
'''Data wrapper accepting filepath.'''
|
||||
|
||||
def __init__(self, path, mode='rb'):
|
||||
'''Open file at *path* with *mode*.'''
|
||||
file_object = open(path, mode)
|
||||
super(File, self).__init__(file_object)
|
||||
|
||||
|
||||
class String(FileWrapper):
|
||||
'''Data wrapper using TemporaryFile instance.'''
|
||||
|
||||
def __init__(self, content=None):
|
||||
'''Initialise data with *content*.'''
|
||||
super(String, self).__init__(
|
||||
tempfile.TemporaryFile()
|
||||
)
|
||||
|
||||
if content is not None:
|
||||
self.wrapped_file.write(content.encode())
|
||||
self.wrapped_file.seek(0)
|
||||
|
||||
|
||||
def write(self, content):
|
||||
if not isinstance(content, bytes):
|
||||
content = content.encode()
|
||||
|
||||
super(String, self).write(
|
||||
content
|
||||
)
|
||||
|
||||
def read(self, limit=None):
|
||||
return super(String, self).read(
|
||||
limit
|
||||
).decode('utf-8')
|
||||
2
pype/vendor/ftrack_api/entity/__init__.py
vendored
Normal file
2
pype/vendor/ftrack_api/entity/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
91
pype/vendor/ftrack_api/entity/asset_version.py
vendored
Normal file
91
pype/vendor/ftrack_api/entity/asset_version.py
vendored
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
import ftrack_api.entity.base
|
||||
|
||||
|
||||
class AssetVersion(ftrack_api.entity.base.Entity):
|
||||
'''Represent asset version.'''
|
||||
|
||||
def create_component(
|
||||
self, path, data=None, location=None
|
||||
):
|
||||
'''Create a new component from *path* with additional *data*
|
||||
|
||||
.. note::
|
||||
|
||||
This is a helper method. To create components manually use the
|
||||
standard :meth:`Session.create` method.
|
||||
|
||||
*path* can be a string representing a filesystem path to the data to
|
||||
use for the component. The *path* can also be specified as a sequence
|
||||
string, in which case a sequence component with child components for
|
||||
each item in the sequence will be created automatically. The accepted
|
||||
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
|
||||
example::
|
||||
|
||||
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
|
||||
|
||||
.. seealso::
|
||||
|
||||
`Clique documentation <http://clique.readthedocs.org>`_
|
||||
|
||||
*data* should be a dictionary of any additional data to construct the
|
||||
component with (as passed to :meth:`Session.create`). This version is
|
||||
automatically set as the component's version.
|
||||
|
||||
If *location* is specified then automatically add component to that
|
||||
location.
|
||||
|
||||
'''
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
data.pop('version_id', None)
|
||||
data['version'] = self
|
||||
|
||||
return self.session.create_component(path, data=data, location=location)
|
||||
|
||||
def encode_media(self, media, keep_original='auto'):
|
||||
'''Return a new Job that encode *media* to make it playable in browsers.
|
||||
|
||||
*media* can be a path to a file or a FileComponent in the ftrack.server
|
||||
location.
|
||||
|
||||
The job will encode *media* based on the file type and job data contains
|
||||
information about encoding in the following format::
|
||||
|
||||
{
|
||||
'output': [{
|
||||
'format': 'video/mp4',
|
||||
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
|
||||
}, {
|
||||
'format': 'image/jpeg',
|
||||
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
|
||||
}],
|
||||
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
|
||||
'keep_original': True
|
||||
}
|
||||
|
||||
The output components are associated with the job via the job_components
|
||||
relation.
|
||||
|
||||
An image component will always be generated if possible, and will be
|
||||
set as the version's thumbnail.
|
||||
|
||||
The new components will automatically be associated with the version.
|
||||
A server version of 3.3.32 or higher is required for this to function
|
||||
properly.
|
||||
|
||||
If *media* is a file path, a new source component will be created and
|
||||
added to the ftrack server location and a call to :meth:`commit` will be
|
||||
issued. If *media* is a FileComponent, it will be assumed to be in
|
||||
available in the ftrack.server location.
|
||||
|
||||
If *keep_original* is not set, the original media will be kept if it
|
||||
is a FileComponent, and deleted if it is a file path. You can specify
|
||||
True or False to change this behavior.
|
||||
'''
|
||||
return self.session.encode_media(
|
||||
media, version_id=self['id'], keep_original=keep_original
|
||||
)
|
||||
407
pype/vendor/ftrack_api/entity/base.py
vendored
Normal file
407
pype/vendor/ftrack_api/entity/base.py
vendored
Normal file
|
|
@ -0,0 +1,407 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from builtins import str
|
||||
import abc
|
||||
import collections
|
||||
import logging
|
||||
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.attribute
|
||||
import ftrack_api.inspection
|
||||
import ftrack_api.exception
|
||||
import ftrack_api.operation
|
||||
from ftrack_api.logging import LazyLogMessage as L
|
||||
from future.utils import with_metaclass
|
||||
|
||||
|
||||
class _EntityBase(object):
|
||||
'''Base class to allow for mixins, we need a common base.'''
|
||||
pass
|
||||
|
||||
|
||||
class DynamicEntityTypeMetaclass(abc.ABCMeta):
|
||||
'''Custom metaclass to customise representation of dynamic classes.
|
||||
|
||||
.. note::
|
||||
|
||||
Derive from same metaclass as derived bases to avoid conflicts.
|
||||
|
||||
'''
|
||||
def __repr__(self):
|
||||
'''Return representation of class.'''
|
||||
return '<dynamic ftrack class \'{0}\'>'.format(self.__name__)
|
||||
|
||||
|
||||
class Entity(with_metaclass(DynamicEntityTypeMetaclass, _EntityBase, collections.MutableMapping)):
|
||||
'''Base class for all entities.'''
|
||||
|
||||
entity_type = 'Entity'
|
||||
attributes = None
|
||||
primary_key_attributes = None
|
||||
default_projections = None
|
||||
|
||||
def __init__(self, session, data=None, reconstructing=False):
|
||||
'''Initialise entity.
|
||||
|
||||
*session* is an instance of :class:`ftrack_api.session.Session` that
|
||||
this entity instance is bound to.
|
||||
|
||||
*data* is a mapping of key, value pairs to apply as initial attribute
|
||||
values.
|
||||
|
||||
*reconstructing* indicates whether this entity is being reconstructed,
|
||||
such as from a query, and therefore should not have any special creation
|
||||
logic applied, such as initialising defaults for missing data.
|
||||
|
||||
'''
|
||||
super(Entity, self).__init__()
|
||||
self.logger = logging.getLogger(
|
||||
__name__ + '.' + self.__class__.__name__
|
||||
)
|
||||
self.session = session
|
||||
self._inflated = set()
|
||||
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
self.logger.debug(L(
|
||||
'{0} entity from {1!r}.',
|
||||
('Reconstructing' if reconstructing else 'Constructing'), data
|
||||
))
|
||||
|
||||
self._ignore_data_keys = ['__entity_type__']
|
||||
if not reconstructing:
|
||||
self._construct(data)
|
||||
else:
|
||||
self._reconstruct(data)
|
||||
|
||||
def _construct(self, data):
|
||||
'''Construct from *data*.'''
|
||||
# Suspend operation recording so that all modifications can be applied
|
||||
# in single create operation. In addition, recording a modification
|
||||
# operation requires a primary key which may not be available yet.
|
||||
|
||||
relational_attributes = dict()
|
||||
|
||||
with self.session.operation_recording(False):
|
||||
# Set defaults for any unset local attributes.
|
||||
for attribute in self.__class__.attributes:
|
||||
if attribute.name not in data:
|
||||
default_value = attribute.default_value
|
||||
if callable(default_value):
|
||||
default_value = default_value(self)
|
||||
|
||||
attribute.set_local_value(self, default_value)
|
||||
|
||||
|
||||
# Data represents locally set values.
|
||||
for key, value in list(data.items()):
|
||||
if key in self._ignore_data_keys:
|
||||
continue
|
||||
|
||||
attribute = self.__class__.attributes.get(key)
|
||||
if attribute is None:
|
||||
self.logger.debug(L(
|
||||
'Cannot populate {0!r} attribute as no such '
|
||||
'attribute found on entity {1!r}.', key, self
|
||||
))
|
||||
continue
|
||||
|
||||
if not isinstance(attribute, ftrack_api.attribute.ScalarAttribute):
|
||||
relational_attributes.setdefault(
|
||||
attribute, value
|
||||
)
|
||||
|
||||
else:
|
||||
attribute.set_local_value(self, value)
|
||||
|
||||
# Record create operation.
|
||||
# Note: As this operation is recorded *before* any Session.merge takes
|
||||
# place there is the possibility that the operation will hold references
|
||||
# to outdated data in entity_data. However, this would be unusual in
|
||||
# that it would mean the same new entity was created twice and only one
|
||||
# altered. Conversely, if this operation were recorded *after*
|
||||
# Session.merge took place, any cache would not be able to determine
|
||||
# the status of the entity, which could be important if the cache should
|
||||
# not store newly created entities that have not yet been persisted. Out
|
||||
# of these two 'evils' this approach is deemed the lesser at this time.
|
||||
# A third, more involved, approach to satisfy both might be to record
|
||||
# the operation with a PENDING entity_data value and then update with
|
||||
# merged values post merge.
|
||||
if self.session.record_operations:
|
||||
entity_data = {}
|
||||
|
||||
# Lower level API used here to avoid including any empty
|
||||
# collections that are automatically generated on access.
|
||||
for attribute in self.attributes:
|
||||
value = attribute.get_local_value(self)
|
||||
if value is not ftrack_api.symbol.NOT_SET:
|
||||
entity_data[attribute.name] = value
|
||||
|
||||
self.session.recorded_operations.push(
|
||||
ftrack_api.operation.CreateEntityOperation(
|
||||
self.entity_type,
|
||||
ftrack_api.inspection.primary_key(self),
|
||||
entity_data
|
||||
)
|
||||
)
|
||||
|
||||
for attribute, value in list(relational_attributes.items()):
|
||||
# Finally we set values for "relational" attributes, we need
|
||||
# to do this at the end in order to get the create operations
|
||||
# in the correct order as the newly created attributes might
|
||||
# contain references to the newly created entity.
|
||||
|
||||
attribute.set_local_value(
|
||||
self, value
|
||||
)
|
||||
|
||||
def _reconstruct(self, data):
|
||||
'''Reconstruct from *data*.'''
|
||||
# Data represents remote values.
|
||||
for key, value in list(data.items()):
|
||||
if key in self._ignore_data_keys:
|
||||
continue
|
||||
|
||||
attribute = self.__class__.attributes.get(key)
|
||||
if attribute is None:
|
||||
self.logger.debug(L(
|
||||
'Cannot populate {0!r} attribute as no such attribute '
|
||||
'found on entity {1!r}.', key, self
|
||||
))
|
||||
continue
|
||||
|
||||
attribute.set_remote_value(self, value)
|
||||
|
||||
def __repr__(self):
|
||||
'''Return representation of instance.'''
|
||||
return '<dynamic ftrack {0} object {1}>'.format(
|
||||
self.__class__.__name__, id(self)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation of instance.'''
|
||||
with self.session.auto_populating(False):
|
||||
primary_key = ['Unknown']
|
||||
try:
|
||||
primary_key = list(ftrack_api.inspection.primary_key(self).values())
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return '<{0}({1})>'.format(
|
||||
self.__class__.__name__, ', '.join(primary_key)
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
'''Return hash representing instance.'''
|
||||
return hash(str(ftrack_api.inspection.identity(self)))
|
||||
|
||||
def __eq__(self, other):
|
||||
'''Return whether *other* is equal to this instance.
|
||||
|
||||
.. note::
|
||||
|
||||
Equality is determined by both instances having the same identity.
|
||||
Values of attributes are not considered.
|
||||
|
||||
'''
|
||||
try:
|
||||
return (
|
||||
ftrack_api.inspection.identity(other)
|
||||
== ftrack_api.inspection.identity(self)
|
||||
)
|
||||
except (AttributeError, KeyError):
|
||||
return False
|
||||
|
||||
def __getitem__(self, key):
|
||||
'''Return attribute value for *key*.'''
|
||||
attribute = self.__class__.attributes.get(key)
|
||||
if attribute is None:
|
||||
raise KeyError(key)
|
||||
|
||||
return attribute.get_value(self)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
'''Set attribute *value* for *key*.'''
|
||||
attribute = self.__class__.attributes.get(key)
|
||||
if attribute is None:
|
||||
raise KeyError(key)
|
||||
|
||||
attribute.set_local_value(self, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
'''Clear attribute value for *key*.
|
||||
|
||||
.. note::
|
||||
|
||||
Will not remove the attribute, but instead clear any local value
|
||||
and revert to the last known server value.
|
||||
|
||||
'''
|
||||
attribute = self.__class__.attributes.get(key)
|
||||
attribute.set_local_value(self, ftrack_api.symbol.NOT_SET)
|
||||
|
||||
def __iter__(self):
|
||||
'''Iterate over all attributes keys.'''
|
||||
for attribute in self.__class__.attributes:
|
||||
yield attribute.name
|
||||
|
||||
def __len__(self):
|
||||
'''Return count of attributes.'''
|
||||
return len(self.__class__.attributes)
|
||||
|
||||
def values(self):
|
||||
'''Return list of values.'''
|
||||
if self.session.auto_populate:
|
||||
self._populate_unset_scalar_attributes()
|
||||
|
||||
return list(super(Entity, self).values())
|
||||
|
||||
def items(self):
|
||||
'''Return list of tuples of (key, value) pairs.
|
||||
|
||||
.. note::
|
||||
|
||||
Will fetch all values from the server if not already fetched or set
|
||||
locally.
|
||||
|
||||
'''
|
||||
if self.session.auto_populate:
|
||||
self._populate_unset_scalar_attributes()
|
||||
|
||||
return list(super(Entity, self).items())
|
||||
|
||||
def clear(self):
|
||||
'''Reset all locally modified attribute values.'''
|
||||
for attribute in self:
|
||||
del self[attribute]
|
||||
|
||||
def merge(self, entity, merged=None):
|
||||
'''Merge *entity* attribute values and other data into this entity.
|
||||
|
||||
Only merge values from *entity* that are not
|
||||
:attr:`ftrack_api.symbol.NOT_SET`.
|
||||
|
||||
Return a list of changes made with each change being a mapping with
|
||||
the keys:
|
||||
|
||||
* type - Either 'remote_attribute', 'local_attribute' or 'property'.
|
||||
* name - The name of the attribute / property modified.
|
||||
* old_value - The previous value.
|
||||
* new_value - The new merged value.
|
||||
|
||||
'''
|
||||
log_debug = self.logger.isEnabledFor(logging.DEBUG)
|
||||
|
||||
if merged is None:
|
||||
merged = {}
|
||||
|
||||
log_message = 'Merged {type} "{name}": {old_value!r} -> {new_value!r}'
|
||||
changes = []
|
||||
|
||||
# Attributes.
|
||||
|
||||
# Prioritise by type so that scalar values are set first. This should
|
||||
# guarantee that the attributes making up the identity of the entity
|
||||
# are merged before merging any collections that may have references to
|
||||
# this entity.
|
||||
attributes = collections.deque()
|
||||
for attribute in entity.attributes:
|
||||
if isinstance(attribute, ftrack_api.attribute.ScalarAttribute):
|
||||
attributes.appendleft(attribute)
|
||||
else:
|
||||
attributes.append(attribute)
|
||||
|
||||
for other_attribute in attributes:
|
||||
attribute = self.attributes.get(other_attribute.name)
|
||||
|
||||
# Local attributes.
|
||||
other_local_value = other_attribute.get_local_value(entity)
|
||||
if other_local_value is not ftrack_api.symbol.NOT_SET:
|
||||
local_value = attribute.get_local_value(self)
|
||||
if local_value != other_local_value:
|
||||
merged_local_value = self.session.merge(
|
||||
other_local_value, merged=merged
|
||||
)
|
||||
|
||||
attribute.set_local_value(self, merged_local_value)
|
||||
changes.append({
|
||||
'type': 'local_attribute',
|
||||
'name': attribute.name,
|
||||
'old_value': local_value,
|
||||
'new_value': merged_local_value
|
||||
})
|
||||
log_debug and self.logger.debug(
|
||||
log_message.format(**changes[-1])
|
||||
)
|
||||
|
||||
# Remote attributes.
|
||||
other_remote_value = other_attribute.get_remote_value(entity)
|
||||
if other_remote_value is not ftrack_api.symbol.NOT_SET:
|
||||
remote_value = attribute.get_remote_value(self)
|
||||
if remote_value != other_remote_value:
|
||||
merged_remote_value = self.session.merge(
|
||||
other_remote_value, merged=merged
|
||||
)
|
||||
|
||||
attribute.set_remote_value(
|
||||
self, merged_remote_value
|
||||
)
|
||||
|
||||
changes.append({
|
||||
'type': 'remote_attribute',
|
||||
'name': attribute.name,
|
||||
'old_value': remote_value,
|
||||
'new_value': merged_remote_value
|
||||
})
|
||||
|
||||
log_debug and self.logger.debug(
|
||||
log_message.format(**changes[-1])
|
||||
)
|
||||
|
||||
# We need to handle collections separately since
|
||||
# they may store a local copy of the remote attribute
|
||||
# even though it may not be modified.
|
||||
if not isinstance(
|
||||
attribute, ftrack_api.attribute.AbstractCollectionAttribute
|
||||
):
|
||||
continue
|
||||
|
||||
local_value = attribute.get_local_value(
|
||||
self
|
||||
)
|
||||
|
||||
# Populated but not modified, update it.
|
||||
if (
|
||||
local_value is not ftrack_api.symbol.NOT_SET and
|
||||
local_value == remote_value
|
||||
):
|
||||
attribute.set_local_value(
|
||||
self, merged_remote_value
|
||||
)
|
||||
changes.append({
|
||||
'type': 'local_attribute',
|
||||
'name': attribute.name,
|
||||
'old_value': local_value,
|
||||
'new_value': merged_remote_value
|
||||
})
|
||||
|
||||
log_debug and self.logger.debug(
|
||||
log_message.format(**changes[-1])
|
||||
)
|
||||
|
||||
return changes
|
||||
|
||||
def _populate_unset_scalar_attributes(self):
|
||||
'''Populate all unset scalar attributes in one query.'''
|
||||
projections = []
|
||||
for attribute in self.attributes:
|
||||
if isinstance(attribute, ftrack_api.attribute.ScalarAttribute):
|
||||
if attribute.get_remote_value(self) is ftrack_api.symbol.NOT_SET:
|
||||
projections.append(attribute.name)
|
||||
|
||||
if projections:
|
||||
self.session.populate([self], ', '.join(projections))
|
||||
75
pype/vendor/ftrack_api/entity/component.py
vendored
Normal file
75
pype/vendor/ftrack_api/entity/component.py
vendored
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
from builtins import object
|
||||
import ftrack_api.entity.base
|
||||
|
||||
|
||||
class Component(ftrack_api.entity.base.Entity):
|
||||
'''Represent a component.'''
|
||||
|
||||
def get_availability(self, locations=None):
|
||||
'''Return availability in *locations*.
|
||||
|
||||
If *locations* is None, all known locations will be checked.
|
||||
|
||||
Return a dictionary of {location_id:percentage_availability}
|
||||
|
||||
'''
|
||||
return self.session.get_component_availability(
|
||||
self, locations=locations
|
||||
)
|
||||
|
||||
|
||||
class CreateThumbnailMixin(object):
|
||||
'''Mixin to add create_thumbnail method on entity class.'''
|
||||
|
||||
def create_thumbnail(self, path, data=None):
|
||||
'''Set entity thumbnail from *path*.
|
||||
|
||||
Creates a thumbnail component using in the ftrack.server location
|
||||
:meth:`Session.create_component
|
||||
<ftrack_api.session.Session.create_component>` The thumbnail component
|
||||
will be created using *data* if specified. If no component name is
|
||||
given, `thumbnail` will be used.
|
||||
|
||||
The file is expected to be of an appropriate size and valid file
|
||||
type.
|
||||
|
||||
.. note::
|
||||
|
||||
A :meth:`Session.commit<ftrack_api.session.Session.commit>` will be
|
||||
automatically issued.
|
||||
|
||||
'''
|
||||
if data is None:
|
||||
data = {}
|
||||
if not data.get('name'):
|
||||
data['name'] = 'thumbnail'
|
||||
|
||||
thumbnail_component = self.session.create_component(
|
||||
path, data, location=None
|
||||
)
|
||||
|
||||
origin_location = self.session.get(
|
||||
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
|
||||
)
|
||||
server_location = self.session.get(
|
||||
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
|
||||
)
|
||||
server_location.add_component(thumbnail_component, [origin_location])
|
||||
|
||||
# TODO: This commit can be avoided by reordering the operations in
|
||||
# this method so that the component is transferred to ftrack.server
|
||||
# after the thumbnail has been set.
|
||||
#
|
||||
# There is currently a bug in the API backend, causing the operations
|
||||
# to *some* times be ordered wrongly, where the update occurs before
|
||||
# the component has been created, causing an integrity error.
|
||||
#
|
||||
# Once this issue has been resolved, this commit can be removed and
|
||||
# and the update placed between component creation and registration.
|
||||
self['thumbnail_id'] = thumbnail_component['id']
|
||||
self.session.commit()
|
||||
|
||||
return thumbnail_component
|
||||
439
pype/vendor/ftrack_api/entity/factory.py
vendored
Normal file
439
pype/vendor/ftrack_api/entity/factory.py
vendored
Normal file
|
|
@ -0,0 +1,439 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from builtins import str
|
||||
from builtins import object
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
import functools
|
||||
|
||||
import ftrack_api.attribute
|
||||
import ftrack_api.entity.base
|
||||
import ftrack_api.entity.location
|
||||
import ftrack_api.entity.component
|
||||
import ftrack_api.entity.asset_version
|
||||
import ftrack_api.entity.project_schema
|
||||
import ftrack_api.entity.note
|
||||
import ftrack_api.entity.job
|
||||
import ftrack_api.entity.user
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.cache
|
||||
from ftrack_api.logging import LazyLogMessage as L
|
||||
|
||||
|
||||
class Factory(object):
|
||||
'''Entity class factory.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise factory.'''
|
||||
super(Factory, self).__init__()
|
||||
self.logger = logging.getLogger(
|
||||
__name__ + '.' + self.__class__.__name__
|
||||
)
|
||||
|
||||
def create(self, schema, bases=None):
|
||||
'''Create and return entity class from *schema*.
|
||||
|
||||
*bases* should be a list of bases to give the constructed class. If not
|
||||
specified, default to :class:`ftrack_api.entity.base.Entity`.
|
||||
|
||||
'''
|
||||
entity_type = schema['id']
|
||||
class_name = entity_type
|
||||
|
||||
class_bases = bases
|
||||
if class_bases is None:
|
||||
class_bases = [ftrack_api.entity.base.Entity]
|
||||
|
||||
class_namespace = dict()
|
||||
|
||||
# Build attributes for class.
|
||||
attributes = ftrack_api.attribute.Attributes()
|
||||
immutable = schema.get('immutable', [])
|
||||
for name, fragment in list(schema.get('properties', {}).items()):
|
||||
mutable = name not in immutable
|
||||
|
||||
default = fragment.get('default', ftrack_api.symbol.NOT_SET)
|
||||
if default == '{uid}':
|
||||
default = lambda instance: str(uuid.uuid4())
|
||||
|
||||
data_type = fragment.get('type', ftrack_api.symbol.NOT_SET)
|
||||
|
||||
if data_type is not ftrack_api.symbol.NOT_SET:
|
||||
|
||||
if data_type in (
|
||||
'string', 'boolean', 'integer', 'number', 'variable'
|
||||
):
|
||||
# Basic scalar attribute.
|
||||
if data_type == 'number':
|
||||
data_type = 'float'
|
||||
|
||||
if data_type == 'string':
|
||||
data_format = fragment.get('format')
|
||||
if data_format == 'date-time':
|
||||
data_type = 'datetime'
|
||||
|
||||
attribute = self.create_scalar_attribute(
|
||||
class_name, name, mutable, default, data_type
|
||||
)
|
||||
if attribute:
|
||||
attributes.add(attribute)
|
||||
|
||||
elif data_type == 'array':
|
||||
attribute = self.create_collection_attribute(
|
||||
class_name, name, mutable
|
||||
)
|
||||
if attribute:
|
||||
attributes.add(attribute)
|
||||
|
||||
elif data_type == 'mapped_array':
|
||||
reference = fragment.get('items', {}).get('$ref')
|
||||
if not reference:
|
||||
self.logger.debug(L(
|
||||
'Skipping {0}.{1} mapped_array attribute that does '
|
||||
'not define a schema reference.', class_name, name
|
||||
))
|
||||
continue
|
||||
|
||||
attribute = self.create_mapped_collection_attribute(
|
||||
class_name, name, mutable, reference
|
||||
)
|
||||
if attribute:
|
||||
attributes.add(attribute)
|
||||
|
||||
else:
|
||||
self.logger.debug(L(
|
||||
'Skipping {0}.{1} attribute with unrecognised data '
|
||||
'type {2}', class_name, name, data_type
|
||||
))
|
||||
else:
|
||||
# Reference attribute.
|
||||
reference = fragment.get('$ref', ftrack_api.symbol.NOT_SET)
|
||||
if reference is ftrack_api.symbol.NOT_SET:
|
||||
self.logger.debug(L(
|
||||
'Skipping {0}.{1} mapped_array attribute that does '
|
||||
'not define a schema reference.', class_name, name
|
||||
))
|
||||
continue
|
||||
|
||||
attribute = self.create_reference_attribute(
|
||||
class_name, name, mutable, reference
|
||||
)
|
||||
if attribute:
|
||||
attributes.add(attribute)
|
||||
|
||||
default_projections = schema.get('default_projections', [])
|
||||
|
||||
# Construct class.
|
||||
class_namespace['entity_type'] = entity_type
|
||||
class_namespace['attributes'] = attributes
|
||||
class_namespace['primary_key_attributes'] = schema['primary_key'][:]
|
||||
class_namespace['default_projections'] = default_projections
|
||||
|
||||
from future.utils import (
|
||||
native_str
|
||||
)
|
||||
|
||||
cls = type(
|
||||
native_str(class_name), # type doesn't accept unicode.
|
||||
tuple(class_bases),
|
||||
class_namespace
|
||||
)
|
||||
|
||||
return cls
|
||||
|
||||
def create_scalar_attribute(
|
||||
self, class_name, name, mutable, default, data_type
|
||||
):
|
||||
'''Return appropriate scalar attribute instance.'''
|
||||
return ftrack_api.attribute.ScalarAttribute(
|
||||
name, data_type=data_type, default_value=default, mutable=mutable
|
||||
)
|
||||
|
||||
def create_reference_attribute(self, class_name, name, mutable, reference):
|
||||
'''Return appropriate reference attribute instance.'''
|
||||
return ftrack_api.attribute.ReferenceAttribute(
|
||||
name, reference, mutable=mutable
|
||||
)
|
||||
|
||||
def create_collection_attribute(self, class_name, name, mutable):
|
||||
'''Return appropriate collection attribute instance.'''
|
||||
return ftrack_api.attribute.CollectionAttribute(
|
||||
name, mutable=mutable
|
||||
)
|
||||
|
||||
def create_mapped_collection_attribute(
|
||||
self, class_name, name, mutable, reference
|
||||
):
|
||||
'''Return appropriate mapped collection attribute instance.'''
|
||||
self.logger.debug(L(
|
||||
'Skipping {0}.{1} mapped_array attribute that has '
|
||||
'no implementation defined for reference {2}.',
|
||||
class_name, name, reference
|
||||
))
|
||||
|
||||
|
||||
class PerSessionDefaultKeyMaker(ftrack_api.cache.KeyMaker):
|
||||
'''Generate key for defaults.'''
|
||||
|
||||
def _key(self, obj):
|
||||
'''Return key for *obj*.'''
|
||||
if isinstance(obj, dict):
|
||||
entity = obj.get('entity')
|
||||
if entity is not None:
|
||||
# Key by session only.
|
||||
return str(id(entity.session))
|
||||
|
||||
return str(obj)
|
||||
|
||||
|
||||
#: Memoiser for use with default callables that should only be called once per
|
||||
# session.
|
||||
memoise_defaults = ftrack_api.cache.memoise_decorator(
|
||||
ftrack_api.cache.Memoiser(
|
||||
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
|
||||
)
|
||||
)
|
||||
|
||||
#: Memoiser for use with callables that should be called once per session.
|
||||
memoise_session = ftrack_api.cache.memoise_decorator(
|
||||
ftrack_api.cache.Memoiser(
|
||||
key_maker=PerSessionDefaultKeyMaker(), return_copies=False
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@memoise_session
|
||||
def _get_custom_attribute_configurations(session):
|
||||
'''Return list of custom attribute configurations.
|
||||
|
||||
The configuration objects will have key, project_id, id and object_type_id
|
||||
populated.
|
||||
|
||||
'''
|
||||
return session.query(
|
||||
'select key, project_id, id, object_type_id, entity_type, '
|
||||
'is_hierarchical from CustomAttributeConfiguration'
|
||||
).all()
|
||||
|
||||
|
||||
def _get_entity_configurations(entity):
|
||||
'''Return all configurations for current collection entity.'''
|
||||
entity_type = None
|
||||
project_id = None
|
||||
object_type_id = None
|
||||
|
||||
if 'object_type_id' in list(entity.keys()):
|
||||
project_id = entity['project_id']
|
||||
entity_type = 'task'
|
||||
object_type_id = entity['object_type_id']
|
||||
|
||||
if entity.entity_type == 'AssetVersion':
|
||||
project_id = entity['asset']['parent']['project_id']
|
||||
entity_type = 'assetversion'
|
||||
|
||||
if entity.entity_type == 'Project':
|
||||
project_id = entity['id']
|
||||
entity_type = 'show'
|
||||
|
||||
if entity.entity_type == 'User':
|
||||
entity_type = 'user'
|
||||
|
||||
if entity.entity_type == 'Asset':
|
||||
entity_type = 'asset'
|
||||
|
||||
if entity.entity_type in ('TypedContextList', 'AssetVersionList'):
|
||||
entity_type = 'list'
|
||||
|
||||
if entity_type is None:
|
||||
raise ValueError(
|
||||
'Entity {!r} not supported.'.format(entity)
|
||||
)
|
||||
|
||||
configurations = []
|
||||
for configuration in _get_custom_attribute_configurations(
|
||||
entity.session
|
||||
):
|
||||
if (
|
||||
configuration['entity_type'] == entity_type and
|
||||
configuration['project_id'] in (project_id, None) and
|
||||
configuration['object_type_id'] == object_type_id
|
||||
):
|
||||
# The custom attribute configuration is for the target entity type.
|
||||
configurations.append(configuration)
|
||||
elif (
|
||||
entity_type in ('asset', 'assetversion', 'show', 'task') and
|
||||
configuration['project_id'] in (project_id, None) and
|
||||
configuration['is_hierarchical']
|
||||
):
|
||||
# The target entity type allows hierarchical attributes.
|
||||
configurations.append(configuration)
|
||||
|
||||
# Return with global configurations at the end of the list. This is done
|
||||
# so that global conigurations are shadowed by project specific if the
|
||||
# configurations list is looped when looking for a matching `key`.
|
||||
return sorted(
|
||||
configurations, key=lambda item: item['project_id'] is None
|
||||
)
|
||||
|
||||
|
||||
class StandardFactory(Factory):
|
||||
'''Standard entity class factory.'''
|
||||
|
||||
def create(self, schema, bases=None):
|
||||
'''Create and return entity class from *schema*.'''
|
||||
if not bases:
|
||||
bases = []
|
||||
|
||||
extra_bases = []
|
||||
# Customise classes.
|
||||
if schema['id'] == 'ProjectSchema':
|
||||
extra_bases = [ftrack_api.entity.project_schema.ProjectSchema]
|
||||
|
||||
elif schema['id'] == 'Location':
|
||||
extra_bases = [ftrack_api.entity.location.Location]
|
||||
|
||||
elif schema['id'] == 'AssetVersion':
|
||||
extra_bases = [ftrack_api.entity.asset_version.AssetVersion]
|
||||
|
||||
elif schema['id'].endswith('Component'):
|
||||
extra_bases = [ftrack_api.entity.component.Component]
|
||||
|
||||
elif schema['id'] == 'Note':
|
||||
extra_bases = [ftrack_api.entity.note.Note]
|
||||
|
||||
elif schema['id'] == 'Job':
|
||||
extra_bases = [ftrack_api.entity.job.Job]
|
||||
|
||||
elif schema['id'] == 'User':
|
||||
extra_bases = [ftrack_api.entity.user.User]
|
||||
|
||||
bases = extra_bases + bases
|
||||
|
||||
# If bases does not contain any items, add the base entity class.
|
||||
if not bases:
|
||||
bases = [ftrack_api.entity.base.Entity]
|
||||
|
||||
# Add mixins.
|
||||
if 'notes' in schema.get('properties', {}):
|
||||
bases.append(
|
||||
ftrack_api.entity.note.CreateNoteMixin
|
||||
)
|
||||
|
||||
if 'thumbnail_id' in schema.get('properties', {}):
|
||||
bases.append(
|
||||
ftrack_api.entity.component.CreateThumbnailMixin
|
||||
)
|
||||
|
||||
cls = super(StandardFactory, self).create(schema, bases=bases)
|
||||
|
||||
return cls
|
||||
|
||||
def create_mapped_collection_attribute(
|
||||
self, class_name, name, mutable, reference
|
||||
):
|
||||
'''Return appropriate mapped collection attribute instance.'''
|
||||
if reference == 'Metadata':
|
||||
|
||||
def create_metadata(proxy, data, reference):
|
||||
'''Return metadata for *data*.'''
|
||||
entity = proxy.collection.entity
|
||||
session = entity.session
|
||||
data.update({
|
||||
'parent_id': entity['id'],
|
||||
'parent_type': entity.entity_type
|
||||
})
|
||||
return session.create(reference, data)
|
||||
|
||||
creator = functools.partial(
|
||||
create_metadata, reference=reference
|
||||
)
|
||||
key_attribute = 'key'
|
||||
value_attribute = 'value'
|
||||
|
||||
return ftrack_api.attribute.KeyValueMappedCollectionAttribute(
|
||||
name, creator, key_attribute, value_attribute, mutable=mutable
|
||||
)
|
||||
|
||||
elif reference == 'CustomAttributeValue':
|
||||
return (
|
||||
ftrack_api.attribute.CustomAttributeCollectionAttribute(
|
||||
name, mutable=mutable
|
||||
)
|
||||
)
|
||||
|
||||
elif reference.endswith('CustomAttributeValue'):
|
||||
def creator(proxy, data):
|
||||
'''Create a custom attribute based on *proxy* and *data*.
|
||||
|
||||
Raise :py:exc:`KeyError` if related entity is already presisted
|
||||
to the server. The proxy represents dense custom attribute
|
||||
values and should never create new custom attribute values
|
||||
through the proxy if entity exists on the remote.
|
||||
|
||||
If the entity is not persisted the ususal
|
||||
<entity_type>CustomAttributeValue items cannot be updated as
|
||||
the related entity does not exist on remote and values not in
|
||||
the proxy. Instead a <entity_type>CustomAttributeValue will
|
||||
be reconstructed and an update operation will be recorded.
|
||||
|
||||
'''
|
||||
entity = proxy.collection.entity
|
||||
if (
|
||||
ftrack_api.inspection.state(entity) is not
|
||||
ftrack_api.symbol.CREATED
|
||||
):
|
||||
raise KeyError(
|
||||
'Custom attributes must be created explicitly for the '
|
||||
'given entity type before being set.'
|
||||
)
|
||||
|
||||
configuration = None
|
||||
for candidate in _get_entity_configurations(entity):
|
||||
if candidate['key'] == data['key']:
|
||||
configuration = candidate
|
||||
break
|
||||
|
||||
if configuration is None:
|
||||
raise ValueError(
|
||||
u'No valid custom attribute for data {0!r} was found.'
|
||||
.format(data)
|
||||
)
|
||||
|
||||
create_data = dict(list(data.items()))
|
||||
create_data['configuration_id'] = configuration['id']
|
||||
create_data['entity_id'] = entity['id']
|
||||
|
||||
session = entity.session
|
||||
|
||||
# Create custom attribute by reconstructing it and update the
|
||||
# value. This will prevent a create operation to be sent to the
|
||||
# remote, as create operations for this entity type is not
|
||||
# allowed. Instead an update operation will be recorded.
|
||||
value = create_data.pop('value')
|
||||
item = session.create(
|
||||
reference,
|
||||
create_data,
|
||||
reconstructing=True
|
||||
)
|
||||
|
||||
# Record update operation.
|
||||
item['value'] = value
|
||||
|
||||
return item
|
||||
|
||||
key_attribute = 'key'
|
||||
value_attribute = 'value'
|
||||
|
||||
return ftrack_api.attribute.KeyValueMappedCollectionAttribute(
|
||||
name, creator, key_attribute, value_attribute, mutable=mutable
|
||||
)
|
||||
|
||||
self.logger.debug(L(
|
||||
'Skipping {0}.{1} mapped_array attribute that has no configuration '
|
||||
'for reference {2}.', class_name, name, reference
|
||||
))
|
||||
48
pype/vendor/ftrack_api/entity/job.py
vendored
Normal file
48
pype/vendor/ftrack_api/entity/job.py
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
import ftrack_api.entity.base
|
||||
|
||||
|
||||
class Job(ftrack_api.entity.base.Entity):
|
||||
'''Represent job.'''
|
||||
|
||||
def __init__(self, session, data=None, reconstructing=False):
|
||||
'''Initialise entity.
|
||||
|
||||
*session* is an instance of :class:`ftrack_api.session.Session` that
|
||||
this entity instance is bound to.
|
||||
|
||||
*data* is a mapping of key, value pairs to apply as initial attribute
|
||||
values.
|
||||
|
||||
To set a job `description` visible in the web interface, *data* can
|
||||
contain a key called `data` which should be a JSON serialised
|
||||
dictionary containing description::
|
||||
|
||||
data = {
|
||||
'status': 'running',
|
||||
'data': json.dumps(dict(description='My job description.')),
|
||||
...
|
||||
}
|
||||
|
||||
Will raise a :py:exc:`ValueError` if *data* contains `type` and `type`
|
||||
is set to something not equal to "api_job".
|
||||
|
||||
*reconstructing* indicates whether this entity is being reconstructed,
|
||||
such as from a query, and therefore should not have any special creation
|
||||
logic applied, such as initialising defaults for missing data.
|
||||
|
||||
'''
|
||||
|
||||
if not reconstructing:
|
||||
if data.get('type') not in ('api_job', None):
|
||||
raise ValueError(
|
||||
'Invalid job type "{0}". Must be "api_job"'.format(
|
||||
data.get('type')
|
||||
)
|
||||
)
|
||||
|
||||
super(Job, self).__init__(
|
||||
session, data=data, reconstructing=reconstructing
|
||||
)
|
||||
735
pype/vendor/ftrack_api/entity/location.py
vendored
Normal file
735
pype/vendor/ftrack_api/entity/location.py
vendored
Normal file
|
|
@ -0,0 +1,735 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
from builtins import zip
|
||||
from past.builtins import basestring
|
||||
from builtins import object
|
||||
import collections
|
||||
import functools
|
||||
|
||||
import ftrack_api.entity.base
|
||||
import ftrack_api.exception
|
||||
import ftrack_api.event.base
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.inspection
|
||||
from ftrack_api.logging import LazyLogMessage as L
|
||||
|
||||
|
||||
class Location(ftrack_api.entity.base.Entity):
|
||||
'''Represent storage for components.'''
|
||||
|
||||
def __init__(self, session, data=None, reconstructing=False):
|
||||
'''Initialise entity.
|
||||
|
||||
*session* is an instance of :class:`ftrack_api.session.Session` that
|
||||
this entity instance is bound to.
|
||||
|
||||
*data* is a mapping of key, value pairs to apply as initial attribute
|
||||
values.
|
||||
|
||||
*reconstructing* indicates whether this entity is being reconstructed,
|
||||
such as from a query, and therefore should not have any special creation
|
||||
logic applied, such as initialising defaults for missing data.
|
||||
|
||||
'''
|
||||
self.accessor = ftrack_api.symbol.NOT_SET
|
||||
self.structure = ftrack_api.symbol.NOT_SET
|
||||
self.resource_identifier_transformer = ftrack_api.symbol.NOT_SET
|
||||
self.priority = 95
|
||||
super(Location, self).__init__(
|
||||
session, data=data, reconstructing=reconstructing
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation of instance.'''
|
||||
representation = super(Location, self).__str__()
|
||||
|
||||
with self.session.auto_populating(False):
|
||||
name = self['name']
|
||||
if name is not ftrack_api.symbol.NOT_SET:
|
||||
representation = representation.replace(
|
||||
'(', '("{0}", '.format(name)
|
||||
)
|
||||
|
||||
return representation
|
||||
|
||||
def add_component(self, component, source, recursive=True):
|
||||
'''Add *component* to location.
|
||||
|
||||
*component* should be a single component instance.
|
||||
|
||||
*source* should be an instance of another location that acts as the
|
||||
source.
|
||||
|
||||
Raise :exc:`ftrack_api.ComponentInLocationError` if the *component*
|
||||
already exists in this location.
|
||||
|
||||
Raise :exc:`ftrack_api.LocationError` if managing data and the generated
|
||||
target structure for the component already exists according to the
|
||||
accessor. This helps prevent potential data loss by avoiding overwriting
|
||||
existing data. Note that there is a race condition between the check and
|
||||
the write so if another process creates data at the same target during
|
||||
that period it will be overwritten.
|
||||
|
||||
.. note::
|
||||
|
||||
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
|
||||
automatically issued as part of the component registration.
|
||||
|
||||
'''
|
||||
return self.add_components(
|
||||
[component], sources=source, recursive=recursive
|
||||
)
|
||||
|
||||
def add_components(self, components, sources, recursive=True, _depth=0):
|
||||
'''Add *components* to location.
|
||||
|
||||
*components* should be a list of component instances.
|
||||
|
||||
*sources* may be either a single source or a list of sources. If a list
|
||||
then each corresponding index in *sources* will be used for each
|
||||
*component*. A source should be an instance of another location.
|
||||
|
||||
Raise :exc:`ftrack_api.exception.ComponentInLocationError` if any
|
||||
component in *components* already exists in this location. In this case,
|
||||
no changes will be made and no data transferred.
|
||||
|
||||
Raise :exc:`ftrack_api.exception.LocationError` if managing data and the
|
||||
generated target structure for the component already exists according to
|
||||
the accessor. This helps prevent potential data loss by avoiding
|
||||
overwriting existing data. Note that there is a race condition between
|
||||
the check and the write so if another process creates data at the same
|
||||
target during that period it will be overwritten.
|
||||
|
||||
.. note::
|
||||
|
||||
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
|
||||
automatically issued as part of the components registration.
|
||||
|
||||
.. important::
|
||||
|
||||
If this location manages data then the *components* data is first
|
||||
transferred to the target prescribed by the structure plugin, using
|
||||
the configured accessor. If any component fails to transfer then
|
||||
:exc:`ftrack_api.exception.LocationError` is raised and none of the
|
||||
components are registered with the database. In this case it is left
|
||||
up to the caller to decide and act on manually cleaning up any
|
||||
transferred data using the 'transferred' detail in the raised error.
|
||||
|
||||
Likewise, after transfer, all components are registered with the
|
||||
database in a batch call. If any component causes an error then all
|
||||
components will remain unregistered and
|
||||
:exc:`ftrack_api.exception.LocationError` will be raised detailing
|
||||
issues and any transferred data under the 'transferred' detail key.
|
||||
|
||||
'''
|
||||
if (
|
||||
isinstance(sources, basestring)
|
||||
or not isinstance(sources, collections.Sequence)
|
||||
):
|
||||
sources = [sources]
|
||||
|
||||
sources_count = len(sources)
|
||||
if sources_count not in (1, len(components)):
|
||||
raise ValueError(
|
||||
'sources must be either a single source or a sequence of '
|
||||
'sources with indexes corresponding to passed components.'
|
||||
)
|
||||
|
||||
if not self.structure:
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'No structure defined for location {location}.',
|
||||
details=dict(location=self)
|
||||
)
|
||||
|
||||
if not components:
|
||||
# Optimisation: Return early when no components to process, such as
|
||||
# when called recursively on an empty sequence component.
|
||||
return
|
||||
|
||||
indent = ' ' * (_depth + 1)
|
||||
|
||||
# Check that components not already added to location.
|
||||
existing_components = []
|
||||
try:
|
||||
self.get_resource_identifiers(components)
|
||||
|
||||
except ftrack_api.exception.ComponentNotInLocationError as error:
|
||||
missing_component_ids = [
|
||||
missing_component['id']
|
||||
for missing_component in error.details['components']
|
||||
]
|
||||
for component in components:
|
||||
if component['id'] not in missing_component_ids:
|
||||
existing_components.append(component)
|
||||
|
||||
else:
|
||||
existing_components.extend(components)
|
||||
|
||||
if existing_components:
|
||||
# Some of the components already present in location.
|
||||
raise ftrack_api.exception.ComponentInLocationError(
|
||||
existing_components, self
|
||||
)
|
||||
|
||||
# Attempt to transfer each component's data to this location.
|
||||
transferred = []
|
||||
|
||||
for index, component in enumerate(components):
|
||||
try:
|
||||
# Determine appropriate source.
|
||||
if sources_count == 1:
|
||||
source = sources[0]
|
||||
else:
|
||||
source = sources[index]
|
||||
|
||||
# Add members first for container components.
|
||||
is_container = 'members' in list(component.keys())
|
||||
if is_container and recursive:
|
||||
self.add_components(
|
||||
component['members'], source, recursive=recursive,
|
||||
_depth=(_depth + 1)
|
||||
)
|
||||
|
||||
# Add component to this location.
|
||||
context = self._get_context(component, source)
|
||||
resource_identifier = self.structure.get_resource_identifier(
|
||||
component, context
|
||||
)
|
||||
|
||||
# Manage data transfer.
|
||||
self._add_data(component, resource_identifier, source)
|
||||
|
||||
except Exception as error:
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'Failed to transfer component {component} data to location '
|
||||
'{location} due to error:\n{indent}{error}\n{indent}'
|
||||
'Transferred component data that may require cleanup: '
|
||||
'{transferred}',
|
||||
details=dict(
|
||||
indent=indent,
|
||||
component=component,
|
||||
location=self,
|
||||
error=error,
|
||||
transferred=transferred
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
transferred.append((component, resource_identifier))
|
||||
|
||||
# Register all successfully transferred components.
|
||||
components_to_register = []
|
||||
component_resource_identifiers = []
|
||||
|
||||
try:
|
||||
for component, resource_identifier in transferred:
|
||||
if self.resource_identifier_transformer:
|
||||
# Optionally encode resource identifier before storing.
|
||||
resource_identifier = (
|
||||
self.resource_identifier_transformer.encode(
|
||||
resource_identifier,
|
||||
context={'component': component}
|
||||
)
|
||||
)
|
||||
|
||||
components_to_register.append(component)
|
||||
component_resource_identifiers.append(resource_identifier)
|
||||
|
||||
# Store component in location information.
|
||||
self._register_components_in_location(
|
||||
components, component_resource_identifiers
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'Failed to register components with location {location} due to '
|
||||
'error:\n{indent}{error}\n{indent}Transferred component data '
|
||||
'that may require cleanup: {transferred}',
|
||||
details=dict(
|
||||
indent=indent,
|
||||
location=self,
|
||||
error=error,
|
||||
transferred=transferred
|
||||
)
|
||||
)
|
||||
|
||||
# Publish events.
|
||||
for component in components_to_register:
|
||||
|
||||
component_id = list(ftrack_api.inspection.primary_key(
|
||||
component
|
||||
).values())[0]
|
||||
location_id = list(ftrack_api.inspection.primary_key(self).values())[0]
|
||||
|
||||
self.session.event_hub.publish(
|
||||
ftrack_api.event.base.Event(
|
||||
topic=ftrack_api.symbol.COMPONENT_ADDED_TO_LOCATION_TOPIC,
|
||||
data=dict(
|
||||
component_id=component_id,
|
||||
location_id=location_id
|
||||
),
|
||||
),
|
||||
on_error='ignore'
|
||||
)
|
||||
|
||||
def _get_context(self, component, source):
|
||||
'''Return context for *component* and *source*.'''
|
||||
context = {}
|
||||
if source:
|
||||
try:
|
||||
source_resource_identifier = source.get_resource_identifier(
|
||||
component
|
||||
)
|
||||
except ftrack_api.exception.ComponentNotInLocationError:
|
||||
pass
|
||||
else:
|
||||
context.update(dict(
|
||||
source_resource_identifier=source_resource_identifier
|
||||
))
|
||||
|
||||
return context
|
||||
|
||||
def _add_data(self, component, resource_identifier, source):
|
||||
'''Manage transfer of *component* data from *source*.
|
||||
|
||||
*resource_identifier* specifies the identifier to use with this
|
||||
locations accessor.
|
||||
|
||||
'''
|
||||
self.logger.debug(L(
|
||||
'Adding data for component {0!r} from source {1!r} to location '
|
||||
'{2!r} using resource identifier {3!r}.',
|
||||
component, resource_identifier, source, self
|
||||
))
|
||||
|
||||
# Read data from source and write to this location.
|
||||
if not source.accessor:
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'No accessor defined for source location {location}.',
|
||||
details=dict(location=source)
|
||||
)
|
||||
|
||||
if not self.accessor:
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'No accessor defined for target location {location}.',
|
||||
details=dict(location=self)
|
||||
)
|
||||
|
||||
is_container = 'members' in list(component.keys())
|
||||
if is_container:
|
||||
# TODO: Improve this check. Possibly introduce an inspection
|
||||
# such as ftrack_api.inspection.is_sequence_component.
|
||||
if component.entity_type != 'SequenceComponent':
|
||||
self.accessor.make_container(resource_identifier)
|
||||
|
||||
else:
|
||||
# Try to make container of component.
|
||||
try:
|
||||
container = self.accessor.get_container(
|
||||
resource_identifier
|
||||
)
|
||||
|
||||
except ftrack_api.exception.AccessorParentResourceNotFoundError:
|
||||
# Container could not be retrieved from
|
||||
# resource_identifier. Assume that there is no need to
|
||||
# make the container.
|
||||
pass
|
||||
|
||||
else:
|
||||
# No need for existence check as make_container does not
|
||||
# recreate existing containers.
|
||||
self.accessor.make_container(container)
|
||||
|
||||
if self.accessor.exists(resource_identifier):
|
||||
# Note: There is a race condition here in that the
|
||||
# data may be added externally between the check for
|
||||
# existence and the actual write which would still
|
||||
# result in potential data loss. However, there is no
|
||||
# good cross platform, cross accessor solution for this
|
||||
# at present.
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'Cannot add component as data already exists and '
|
||||
'overwriting could result in data loss. Computed '
|
||||
'target resource identifier was: {0}'
|
||||
.format(resource_identifier)
|
||||
)
|
||||
|
||||
# Read and write data.
|
||||
source_data = source.accessor.open(
|
||||
source.get_resource_identifier(component), 'rb'
|
||||
)
|
||||
target_data = self.accessor.open(resource_identifier, 'wb')
|
||||
|
||||
# Read/write data in chunks to avoid reading all into memory at the
|
||||
# same time.
|
||||
chunked_read = functools.partial(
|
||||
source_data.read, ftrack_api.symbol.CHUNK_SIZE
|
||||
)
|
||||
for chunk in iter(chunked_read, b''):
|
||||
target_data.write(chunk)
|
||||
|
||||
target_data.close()
|
||||
source_data.close()
|
||||
|
||||
def _register_component_in_location(self, component, resource_identifier):
|
||||
'''Register *component* in location against *resource_identifier*.'''
|
||||
return self._register_components_in_location(
|
||||
[component], [resource_identifier]
|
||||
)
|
||||
|
||||
def _register_components_in_location(
|
||||
self, components, resource_identifiers
|
||||
):
|
||||
'''Register *components* in location against *resource_identifiers*.
|
||||
|
||||
Indices of *components* and *resource_identifiers* should align.
|
||||
|
||||
'''
|
||||
for component, resource_identifier in zip(
|
||||
components, resource_identifiers
|
||||
):
|
||||
self.session.create(
|
||||
'ComponentLocation', data=dict(
|
||||
component=component,
|
||||
location=self,
|
||||
resource_identifier=resource_identifier
|
||||
)
|
||||
)
|
||||
|
||||
self.session.commit()
|
||||
|
||||
def remove_component(self, component, recursive=True):
|
||||
'''Remove *component* from location.
|
||||
|
||||
.. note::
|
||||
|
||||
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
|
||||
automatically issued as part of the component deregistration.
|
||||
|
||||
'''
|
||||
return self.remove_components([component], recursive=recursive)
|
||||
|
||||
def remove_components(self, components, recursive=True):
|
||||
'''Remove *components* from location.
|
||||
|
||||
.. note::
|
||||
|
||||
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
|
||||
automatically issued as part of the components deregistration.
|
||||
|
||||
'''
|
||||
for component in components:
|
||||
# Check component is in this location
|
||||
self.get_resource_identifier(component)
|
||||
|
||||
# Remove members first for container components.
|
||||
is_container = 'members' in list(component.keys())
|
||||
if is_container and recursive:
|
||||
self.remove_components(
|
||||
component['members'], recursive=recursive
|
||||
)
|
||||
|
||||
# Remove data.
|
||||
self._remove_data(component)
|
||||
|
||||
# Remove metadata.
|
||||
self._deregister_component_in_location(component)
|
||||
|
||||
# Emit event.
|
||||
component_id = list(ftrack_api.inspection.primary_key(
|
||||
component
|
||||
).values())[0]
|
||||
location_id = list(ftrack_api.inspection.primary_key(self).values())[0]
|
||||
self.session.event_hub.publish(
|
||||
ftrack_api.event.base.Event(
|
||||
topic=ftrack_api.symbol.COMPONENT_REMOVED_FROM_LOCATION_TOPIC,
|
||||
data=dict(
|
||||
component_id=component_id,
|
||||
location_id=location_id
|
||||
)
|
||||
),
|
||||
on_error='ignore'
|
||||
)
|
||||
|
||||
def _remove_data(self, component):
|
||||
'''Remove data associated with *component*.'''
|
||||
if not self.accessor:
|
||||
raise ftrack_api.exception.LocationError(
|
||||
'No accessor defined for location {location}.',
|
||||
details=dict(location=self)
|
||||
)
|
||||
|
||||
try:
|
||||
self.accessor.remove(
|
||||
self.get_resource_identifier(component)
|
||||
)
|
||||
except ftrack_api.exception.AccessorResourceNotFoundError:
|
||||
# If accessor does not support detecting sequence paths then an
|
||||
# AccessorResourceNotFoundError is raised. For now, if the
|
||||
# component type is 'SequenceComponent' assume success.
|
||||
if not component.entity_type == 'SequenceComponent':
|
||||
raise
|
||||
|
||||
def _deregister_component_in_location(self, component):
|
||||
'''Deregister *component* from location.'''
|
||||
component_id = list(ftrack_api.inspection.primary_key(component).values())[0]
|
||||
location_id = list(ftrack_api.inspection.primary_key(self).values())[0]
|
||||
|
||||
# TODO: Use session.get for optimisation.
|
||||
component_location = self.session.query(
|
||||
'ComponentLocation where component_id is {0} and location_id is '
|
||||
'{1}'.format(component_id, location_id)
|
||||
)[0]
|
||||
|
||||
self.session.delete(component_location)
|
||||
|
||||
# TODO: Should auto-commit here be optional?
|
||||
self.session.commit()
|
||||
|
||||
def get_component_availability(self, component):
|
||||
'''Return availability of *component* in this location as a float.'''
|
||||
return self.session.get_component_availability(
|
||||
component, locations=[self]
|
||||
)[self['id']]
|
||||
|
||||
def get_component_availabilities(self, components):
|
||||
'''Return availabilities of *components* in this location.
|
||||
|
||||
Return list of float values corresponding to each component.
|
||||
|
||||
'''
|
||||
return [
|
||||
availability[self['id']] for availability in
|
||||
self.session.get_component_availabilities(
|
||||
components, locations=[self]
|
||||
)
|
||||
]
|
||||
|
||||
def get_resource_identifier(self, component):
|
||||
'''Return resource identifier for *component*.
|
||||
|
||||
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if the
|
||||
component is not present in this location.
|
||||
|
||||
'''
|
||||
return self.get_resource_identifiers([component])[0]
|
||||
|
||||
def get_resource_identifiers(self, components):
|
||||
'''Return resource identifiers for *components*.
|
||||
|
||||
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any
|
||||
of the components are not present in this location.
|
||||
|
||||
'''
|
||||
resource_identifiers = self._get_resource_identifiers(components)
|
||||
|
||||
# Optionally decode resource identifier.
|
||||
if self.resource_identifier_transformer:
|
||||
for index, resource_identifier in enumerate(resource_identifiers):
|
||||
resource_identifiers[index] = (
|
||||
self.resource_identifier_transformer.decode(
|
||||
resource_identifier
|
||||
)
|
||||
)
|
||||
|
||||
return resource_identifiers
|
||||
|
||||
def _get_resource_identifiers(self, components):
|
||||
'''Return resource identifiers for *components*.
|
||||
|
||||
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any
|
||||
of the components are not present in this location.
|
||||
|
||||
'''
|
||||
component_ids_mapping = collections.OrderedDict()
|
||||
for component in components:
|
||||
component_id = list(ftrack_api.inspection.primary_key(
|
||||
component
|
||||
).values())[0]
|
||||
component_ids_mapping[component_id] = component
|
||||
|
||||
component_locations = self.session.query(
|
||||
'select component_id, resource_identifier from ComponentLocation '
|
||||
'where location_id is {0} and component_id in ({1})'
|
||||
.format(
|
||||
list(ftrack_api.inspection.primary_key(self).values())[0],
|
||||
', '.join(list(component_ids_mapping.keys()))
|
||||
)
|
||||
)
|
||||
|
||||
resource_identifiers_map = {}
|
||||
for component_location in component_locations:
|
||||
resource_identifiers_map[component_location['component_id']] = (
|
||||
component_location['resource_identifier']
|
||||
)
|
||||
|
||||
resource_identifiers = []
|
||||
missing = []
|
||||
for component_id, component in list(component_ids_mapping.items()):
|
||||
if component_id not in resource_identifiers_map:
|
||||
missing.append(component)
|
||||
else:
|
||||
resource_identifiers.append(
|
||||
resource_identifiers_map[component_id]
|
||||
)
|
||||
|
||||
if missing:
|
||||
raise ftrack_api.exception.ComponentNotInLocationError(
|
||||
missing, self
|
||||
)
|
||||
|
||||
return resource_identifiers
|
||||
|
||||
def get_filesystem_path(self, component):
|
||||
'''Return filesystem path for *component*.'''
|
||||
return self.get_filesystem_paths([component])[0]
|
||||
|
||||
def get_filesystem_paths(self, components):
|
||||
'''Return filesystem paths for *components*.'''
|
||||
resource_identifiers = self.get_resource_identifiers(components)
|
||||
|
||||
filesystem_paths = []
|
||||
for resource_identifier in resource_identifiers:
|
||||
filesystem_paths.append(
|
||||
self.accessor.get_filesystem_path(resource_identifier)
|
||||
)
|
||||
|
||||
return filesystem_paths
|
||||
|
||||
def get_url(self, component):
|
||||
'''Return url for *component*.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
|
||||
URL could not be determined from *component* or
|
||||
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
|
||||
retrieving URL is not supported by the location's accessor.
|
||||
'''
|
||||
resource_identifier = self.get_resource_identifier(component)
|
||||
|
||||
return self.accessor.get_url(resource_identifier)
|
||||
|
||||
|
||||
class MemoryLocationMixin(object):
|
||||
'''Represent storage for components.
|
||||
|
||||
Unlike a standard location, only store metadata for components in this
|
||||
location in memory rather than persisting to the database.
|
||||
|
||||
'''
|
||||
|
||||
@property
|
||||
def _cache(self):
|
||||
'''Return cache.'''
|
||||
try:
|
||||
cache = self.__cache
|
||||
except AttributeError:
|
||||
cache = self.__cache = {}
|
||||
|
||||
return cache
|
||||
|
||||
def _register_component_in_location(self, component, resource_identifier):
|
||||
'''Register *component* in location with *resource_identifier*.'''
|
||||
component_id = list(ftrack_api.inspection.primary_key(component).values())[0]
|
||||
self._cache[component_id] = resource_identifier
|
||||
|
||||
def _register_components_in_location(
|
||||
self, components, resource_identifiers
|
||||
):
|
||||
'''Register *components* in location against *resource_identifiers*.
|
||||
|
||||
Indices of *components* and *resource_identifiers* should align.
|
||||
|
||||
'''
|
||||
for component, resource_identifier in zip(
|
||||
components, resource_identifiers
|
||||
):
|
||||
self._register_component_in_location(component, resource_identifier)
|
||||
|
||||
def _deregister_component_in_location(self, component):
|
||||
'''Deregister *component* in location.'''
|
||||
component_id = list(ftrack_api.inspection.primary_key(component).values())[0]
|
||||
self._cache.pop(component_id)
|
||||
|
||||
def _get_resource_identifiers(self, components):
|
||||
'''Return resource identifiers for *components*.
|
||||
|
||||
Raise :exc:`ftrack_api.exception.ComponentNotInLocationError` if any
|
||||
of the referenced components are not present in this location.
|
||||
|
||||
'''
|
||||
resource_identifiers = []
|
||||
missing = []
|
||||
for component in components:
|
||||
component_id = list(ftrack_api.inspection.primary_key(
|
||||
component
|
||||
).values())[0]
|
||||
resource_identifier = self._cache.get(component_id)
|
||||
if resource_identifier is None:
|
||||
missing.append(component)
|
||||
else:
|
||||
resource_identifiers.append(resource_identifier)
|
||||
|
||||
if missing:
|
||||
raise ftrack_api.exception.ComponentNotInLocationError(
|
||||
missing, self
|
||||
)
|
||||
|
||||
return resource_identifiers
|
||||
|
||||
|
||||
class UnmanagedLocationMixin(object):
|
||||
'''Location that does not manage data.'''
|
||||
|
||||
def _add_data(self, component, resource_identifier, source):
|
||||
'''Manage transfer of *component* data from *source*.
|
||||
|
||||
*resource_identifier* specifies the identifier to use with this
|
||||
locations accessor.
|
||||
|
||||
Overridden to have no effect.
|
||||
|
||||
'''
|
||||
return
|
||||
|
||||
def _remove_data(self, component):
|
||||
'''Remove data associated with *component*.
|
||||
|
||||
Overridden to have no effect.
|
||||
|
||||
'''
|
||||
return
|
||||
|
||||
|
||||
class OriginLocationMixin(MemoryLocationMixin, UnmanagedLocationMixin):
|
||||
'''Special origin location that expects sources as filepaths.'''
|
||||
|
||||
def _get_context(self, component, source):
|
||||
'''Return context for *component* and *source*.'''
|
||||
context = {}
|
||||
if source:
|
||||
context.update(dict(
|
||||
source_resource_identifier=source
|
||||
))
|
||||
|
||||
return context
|
||||
|
||||
|
||||
class ServerLocationMixin(object):
|
||||
'''Location representing ftrack server.
|
||||
|
||||
Adds convenience methods to location, specific to ftrack server.
|
||||
'''
|
||||
def get_thumbnail_url(self, component, size=None):
|
||||
'''Return thumbnail url for *component*.
|
||||
|
||||
Optionally, specify *size* to constrain the downscaled image to size
|
||||
x size pixels.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.AccessorFilesystemPathError` if
|
||||
URL could not be determined from *resource_identifier* or
|
||||
:exc:`~ftrack_api.exception.AccessorUnsupportedOperationError` if
|
||||
retrieving URL is not supported by the location's accessor.
|
||||
'''
|
||||
resource_identifier = self.get_resource_identifier(component)
|
||||
return self.accessor.get_thumbnail_url(resource_identifier, size)
|
||||
69
pype/vendor/ftrack_api/entity/note.py
vendored
Normal file
69
pype/vendor/ftrack_api/entity/note.py
vendored
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
from builtins import object
|
||||
import ftrack_api.entity.base
|
||||
|
||||
|
||||
class Note(ftrack_api.entity.base.Entity):
|
||||
'''Represent a note.'''
|
||||
|
||||
def create_reply(
|
||||
self, content, author
|
||||
):
|
||||
'''Create a reply with *content* and *author*.
|
||||
|
||||
.. note::
|
||||
|
||||
This is a helper method. To create replies manually use the
|
||||
standard :meth:`Session.create` method.
|
||||
|
||||
'''
|
||||
reply = self.session.create(
|
||||
'Note', {
|
||||
'author': author,
|
||||
'content': content
|
||||
}
|
||||
)
|
||||
|
||||
self['replies'].append(reply)
|
||||
|
||||
return reply
|
||||
|
||||
|
||||
class CreateNoteMixin(object):
|
||||
'''Mixin to add create_note method on entity class.'''
|
||||
|
||||
def create_note(self, content, author, recipients=None, category=None):
|
||||
'''Create note with *content*, *author*.
|
||||
|
||||
Note category can be set by including *category* and *recipients*
|
||||
can be specified as a list of user or group instances.
|
||||
|
||||
'''
|
||||
if not recipients:
|
||||
recipients = []
|
||||
|
||||
category_id = None
|
||||
if category:
|
||||
category_id = category['id']
|
||||
|
||||
data = {
|
||||
'content': content,
|
||||
'author': author,
|
||||
'category_id': category_id
|
||||
}
|
||||
|
||||
note = self.session.create('Note', data)
|
||||
|
||||
self['notes'].append(note)
|
||||
|
||||
for resource in recipients:
|
||||
recipient = self.session.create('Recipient', {
|
||||
'note_id': note['id'],
|
||||
'resource_id': resource['id']
|
||||
})
|
||||
|
||||
note['recipients'].append(recipient)
|
||||
|
||||
return note
|
||||
94
pype/vendor/ftrack_api/entity/project_schema.py
vendored
Normal file
94
pype/vendor/ftrack_api/entity/project_schema.py
vendored
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
import ftrack_api.entity.base
|
||||
|
||||
|
||||
class ProjectSchema(ftrack_api.entity.base.Entity):
|
||||
'''Class representing ProjectSchema.'''
|
||||
|
||||
def get_statuses(self, schema, type_id=None):
|
||||
'''Return statuses for *schema* and optional *type_id*.
|
||||
|
||||
*type_id* is the id of the Type for a TypedContext and can be used to
|
||||
get statuses where the workflow has been overridden.
|
||||
|
||||
'''
|
||||
# Task has overrides and need to be handled separately.
|
||||
if schema == 'Task':
|
||||
if type_id is not None:
|
||||
overrides = self['_overrides']
|
||||
for override in overrides:
|
||||
if override['type_id'] == type_id:
|
||||
return override['workflow_schema']['statuses'][:]
|
||||
|
||||
return self['_task_workflow']['statuses'][:]
|
||||
|
||||
elif schema == 'AssetVersion':
|
||||
return self['_version_workflow']['statuses'][:]
|
||||
|
||||
else:
|
||||
try:
|
||||
EntityTypeClass = self.session.types[schema]
|
||||
except KeyError:
|
||||
raise ValueError('Schema {0} does not exist.'.format(schema))
|
||||
|
||||
object_type_id_attribute = EntityTypeClass.attributes.get(
|
||||
'object_type_id'
|
||||
)
|
||||
|
||||
try:
|
||||
object_type_id = object_type_id_attribute.default_value
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
'Schema {0} does not have statuses.'.format(schema)
|
||||
)
|
||||
|
||||
for _schema in self['_schemas']:
|
||||
if _schema['type_id'] == object_type_id:
|
||||
result = self.session.query(
|
||||
'select task_status from SchemaStatus '
|
||||
'where schema_id is {0}'.format(_schema['id'])
|
||||
)
|
||||
return [
|
||||
schema_type['task_status'] for schema_type in result
|
||||
]
|
||||
|
||||
raise ValueError(
|
||||
'No valid statuses were found for schema {0}.'.format(schema)
|
||||
)
|
||||
|
||||
def get_types(self, schema):
|
||||
'''Return types for *schema*.'''
|
||||
# Task need to be handled separately.
|
||||
if schema == 'Task':
|
||||
return self['_task_type_schema']['types'][:]
|
||||
|
||||
else:
|
||||
try:
|
||||
EntityTypeClass = self.session.types[schema]
|
||||
except KeyError:
|
||||
raise ValueError('Schema {0} does not exist.'.format(schema))
|
||||
|
||||
object_type_id_attribute = EntityTypeClass.attributes.get(
|
||||
'object_type_id'
|
||||
)
|
||||
|
||||
try:
|
||||
object_type_id = object_type_id_attribute.default_value
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
'Schema {0} does not have types.'.format(schema)
|
||||
)
|
||||
|
||||
for _schema in self['_schemas']:
|
||||
if _schema['type_id'] == object_type_id:
|
||||
result = self.session.query(
|
||||
'select task_type from SchemaType '
|
||||
'where schema_id is {0}'.format(_schema['id'])
|
||||
)
|
||||
return [schema_type['task_type'] for schema_type in result]
|
||||
|
||||
raise ValueError(
|
||||
'No valid types were found for schema {0}.'.format(schema)
|
||||
)
|
||||
124
pype/vendor/ftrack_api/entity/user.py
vendored
Normal file
124
pype/vendor/ftrack_api/entity/user.py
vendored
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
from builtins import str
|
||||
import arrow
|
||||
|
||||
import ftrack_api.entity.base
|
||||
import ftrack_api.exception
|
||||
|
||||
|
||||
class User(ftrack_api.entity.base.Entity):
|
||||
'''Represent a user.'''
|
||||
|
||||
def start_timer(self, context=None, comment='', name=None, force=False):
|
||||
'''Start a timer for *context* and return it.
|
||||
|
||||
*force* can be used to automatically stop an existing timer and create a
|
||||
timelog for it. If you need to get access to the created timelog, use
|
||||
:func:`stop_timer` instead.
|
||||
|
||||
*comment* and *name* are optional but will be set on the timer.
|
||||
|
||||
.. note::
|
||||
|
||||
This method will automatically commit the changes and if *force* is
|
||||
False then it will fail with a
|
||||
:class:`ftrack_api.exception.NotUniqueError` exception if a
|
||||
timer is already running.
|
||||
|
||||
'''
|
||||
if force:
|
||||
try:
|
||||
self.stop_timer()
|
||||
except ftrack_api.exception.NoResultFoundError:
|
||||
self.logger.debug('Failed to stop existing timer.')
|
||||
|
||||
timer = self.session.create('Timer', {
|
||||
'user': self,
|
||||
'context': context,
|
||||
'name': name,
|
||||
'comment': comment
|
||||
})
|
||||
|
||||
# Commit the new timer and try to catch any error that indicate another
|
||||
# timelog already exists and inform the user about it.
|
||||
try:
|
||||
self.session.commit()
|
||||
except ftrack_api.exception.ServerError as error:
|
||||
if 'IntegrityError' in str(error):
|
||||
raise ftrack_api.exception.NotUniqueError(
|
||||
('Failed to start a timelog for user with id: {0}, it is '
|
||||
'likely that a timer is already running. Either use '
|
||||
'force=True or stop the timer first.').format(self['id'])
|
||||
)
|
||||
else:
|
||||
# Reraise the error as it might be something unrelated.
|
||||
raise
|
||||
|
||||
return timer
|
||||
|
||||
def stop_timer(self):
|
||||
'''Stop the current timer and return a timelog created from it.
|
||||
|
||||
If a timer is not running, a
|
||||
:exc:`ftrack_api.exception.NoResultFoundError` exception will be
|
||||
raised.
|
||||
|
||||
.. note::
|
||||
|
||||
This method will automatically commit the changes.
|
||||
|
||||
'''
|
||||
timer = self.session.query(
|
||||
'Timer where user_id = "{0}"'.format(self['id'])
|
||||
).one()
|
||||
|
||||
# If the server is running in the same timezone as the local
|
||||
# timezone, we remove the TZ offset to get the correct duration.
|
||||
is_timezone_support_enabled = self.session.server_information.get(
|
||||
'is_timezone_support_enabled', None
|
||||
)
|
||||
if is_timezone_support_enabled is None:
|
||||
self.logger.warning(
|
||||
'Could not identify if server has timezone support enabled. '
|
||||
'Will assume server is running in UTC.'
|
||||
)
|
||||
is_timezone_support_enabled = True
|
||||
|
||||
if is_timezone_support_enabled:
|
||||
now = arrow.now()
|
||||
else:
|
||||
now = arrow.now().replace(tzinfo='utc')
|
||||
|
||||
delta = now - timer['start']
|
||||
duration = delta.days * 24 * 60 * 60 + delta.seconds
|
||||
|
||||
timelog = self.session.create('Timelog', {
|
||||
'user_id': timer['user_id'],
|
||||
'context_id': timer['context_id'],
|
||||
'comment': timer['comment'],
|
||||
'start': timer['start'],
|
||||
'duration': duration,
|
||||
'name': timer['name']
|
||||
})
|
||||
|
||||
self.session.delete(timer)
|
||||
self.session.commit()
|
||||
|
||||
return timelog
|
||||
|
||||
def send_invite(self):
|
||||
'''Send a invation email to the user'''
|
||||
|
||||
self.session.send_user_invite(
|
||||
self
|
||||
)
|
||||
def reset_api_key(self):
|
||||
'''Reset the users api key.'''
|
||||
|
||||
response = self.session.reset_remote(
|
||||
'api_key', entity=self
|
||||
)
|
||||
|
||||
return response['api_key']
|
||||
2
pype/vendor/ftrack_api/event/__init__.py
vendored
Normal file
2
pype/vendor/ftrack_api/event/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
86
pype/vendor/ftrack_api/event/base.py
vendored
Normal file
86
pype/vendor/ftrack_api/event/base.py
vendored
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from builtins import str
|
||||
import uuid
|
||||
import collections
|
||||
|
||||
|
||||
class Event(collections.MutableMapping):
|
||||
'''Represent a single event.'''
|
||||
|
||||
def __init__(self, topic, id=None, data=None, sent=None,
|
||||
source=None, target='', in_reply_to_event=None):
|
||||
'''Initialise event.
|
||||
|
||||
*topic* is the required topic for the event. It can use a dotted
|
||||
notation to demarcate groupings. For example, 'ftrack.update'.
|
||||
|
||||
*id* is the unique id for this event instance. It is primarily used when
|
||||
replying to an event. If not supplied a default uuid based value will
|
||||
be used.
|
||||
|
||||
*data* refers to event specific data. It should be a mapping structure
|
||||
and defaults to an empty dictionary if not supplied.
|
||||
|
||||
*sent* is the timestamp the event is sent. It will be set automatically
|
||||
as send time unless specified here.
|
||||
|
||||
*source* is information about where the event originated. It should be
|
||||
a mapping and include at least a unique id value under an 'id' key. If
|
||||
not specified, senders usually populate the value automatically at
|
||||
publish time.
|
||||
|
||||
*target* can be an expression that targets this event. For example,
|
||||
a reply event would target the event to the sender of the source event.
|
||||
The expression will be tested against subscriber information only.
|
||||
|
||||
*in_reply_to_event* is used when replying to an event and should contain
|
||||
the unique id of the event being replied to.
|
||||
|
||||
'''
|
||||
super(Event, self).__init__()
|
||||
self._data = dict(
|
||||
id=id or uuid.uuid4().hex,
|
||||
data=data or {},
|
||||
topic=topic,
|
||||
sent=sent,
|
||||
source=source or {},
|
||||
target=target,
|
||||
in_reply_to_event=in_reply_to_event
|
||||
)
|
||||
self._stopped = False
|
||||
|
||||
def stop(self):
|
||||
'''Stop further processing of this event.'''
|
||||
self._stopped = True
|
||||
|
||||
def is_stopped(self):
|
||||
'''Return whether event has been stopped.'''
|
||||
return self._stopped
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return '<{0} {1}>'.format(
|
||||
self.__class__.__name__, str(self._data)
|
||||
)
|
||||
|
||||
def __getitem__(self, key):
|
||||
'''Return value for *key*.'''
|
||||
return self._data[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
'''Set *value* for *key*.'''
|
||||
self._data[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
'''Remove *key*.'''
|
||||
del self._data[key]
|
||||
|
||||
def __iter__(self):
|
||||
'''Iterate over all keys.'''
|
||||
return iter(self._data)
|
||||
|
||||
def __len__(self):
|
||||
'''Return count of keys.'''
|
||||
return len(self._data)
|
||||
285
pype/vendor/ftrack_api/event/expression.py
vendored
Normal file
285
pype/vendor/ftrack_api/event/expression.py
vendored
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from builtins import map
|
||||
from past.builtins import basestring
|
||||
from builtins import object
|
||||
from operator import eq, ne, ge, le, gt, lt
|
||||
|
||||
from pyparsing import (Group, Word, CaselessKeyword, Forward,
|
||||
FollowedBy, Suppress, oneOf, OneOrMore, Optional,
|
||||
alphanums, quotedString, removeQuotes)
|
||||
|
||||
import ftrack_api.exception
|
||||
|
||||
# Do not enable packrat since it is not thread-safe and will result in parsing
|
||||
# exceptions in a multi threaded environment.
|
||||
# ParserElement.enablePackrat()
|
||||
|
||||
|
||||
class Parser(object):
|
||||
'''Parse string based expression into :class:`Expression` instance.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise parser.'''
|
||||
self._operators = {
|
||||
'=': eq,
|
||||
'!=': ne,
|
||||
'>=': ge,
|
||||
'<=': le,
|
||||
'>': gt,
|
||||
'<': lt
|
||||
}
|
||||
self._parser = self._construct_parser()
|
||||
super(Parser, self).__init__()
|
||||
|
||||
def _construct_parser(self):
|
||||
'''Construct and return parser.'''
|
||||
field = Word(alphanums + '_.')
|
||||
operator = oneOf(list(self._operators.keys()))
|
||||
value = Word(alphanums + '-_,./*@+')
|
||||
quoted_value = quotedString('quoted_value').setParseAction(removeQuotes)
|
||||
|
||||
condition = Group(
|
||||
field + operator + (quoted_value | value)
|
||||
)('condition')
|
||||
|
||||
not_ = Optional(Suppress(CaselessKeyword('not')))('not')
|
||||
and_ = Suppress(CaselessKeyword('and'))('and')
|
||||
or_ = Suppress(CaselessKeyword('or'))('or')
|
||||
|
||||
expression = Forward()
|
||||
parenthesis = Suppress('(') + expression + Suppress(')')
|
||||
previous = condition | parenthesis
|
||||
|
||||
for conjunction in (not_, and_, or_):
|
||||
current = Forward()
|
||||
|
||||
if conjunction in (and_, or_):
|
||||
conjunction_expression = (
|
||||
FollowedBy(previous + conjunction + previous)
|
||||
+ Group(
|
||||
previous + OneOrMore(conjunction + previous)
|
||||
)(conjunction.resultsName)
|
||||
)
|
||||
|
||||
elif conjunction in (not_, ):
|
||||
conjunction_expression = (
|
||||
FollowedBy(conjunction.expr + current)
|
||||
+ Group(conjunction + current)(conjunction.resultsName)
|
||||
)
|
||||
|
||||
else: # pragma: no cover
|
||||
raise ValueError('Unrecognised conjunction.')
|
||||
|
||||
current <<= (conjunction_expression | previous)
|
||||
previous = current
|
||||
|
||||
expression <<= previous
|
||||
return expression('expression')
|
||||
|
||||
def parse(self, expression):
|
||||
'''Parse string *expression* into :class:`Expression`.
|
||||
|
||||
Raise :exc:`ftrack_api.exception.ParseError` if *expression* could
|
||||
not be parsed.
|
||||
|
||||
'''
|
||||
result = None
|
||||
expression = expression.strip()
|
||||
if expression:
|
||||
try:
|
||||
result = self._parser.parseString(
|
||||
expression, parseAll=True
|
||||
)
|
||||
except Exception as error:
|
||||
raise ftrack_api.exception.ParseError(
|
||||
'Failed to parse: {0}. {1}'.format(expression, error)
|
||||
)
|
||||
|
||||
return self._process(result)
|
||||
|
||||
def _process(self, result):
|
||||
'''Process *result* using appropriate method.
|
||||
|
||||
Method called is determined by the name of the result.
|
||||
|
||||
'''
|
||||
method_name = '_process_{0}'.format(result.getName())
|
||||
method = getattr(self, method_name)
|
||||
return method(result)
|
||||
|
||||
def _process_expression(self, result):
|
||||
'''Process *result* as expression.'''
|
||||
return self._process(result[0])
|
||||
|
||||
def _process_not(self, result):
|
||||
'''Process *result* as NOT operation.'''
|
||||
return Not(self._process(result[0]))
|
||||
|
||||
def _process_and(self, result):
|
||||
'''Process *result* as AND operation.'''
|
||||
return All([self._process(entry) for entry in result])
|
||||
|
||||
def _process_or(self, result):
|
||||
'''Process *result* as OR operation.'''
|
||||
return Any([self._process(entry) for entry in result])
|
||||
|
||||
def _process_condition(self, result):
|
||||
'''Process *result* as condition.'''
|
||||
key, operator, value = result
|
||||
return Condition(key, self._operators[operator], value)
|
||||
|
||||
def _process_quoted_value(self, result):
|
||||
'''Process *result* as quoted value.'''
|
||||
return result
|
||||
|
||||
|
||||
class Expression(object):
|
||||
'''Represent a structured expression to test candidates against.'''
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return '<{0}>'.format(self.__class__.__name__)
|
||||
|
||||
def match(self, candidate):
|
||||
'''Return whether *candidate* satisfies this expression.'''
|
||||
return True
|
||||
|
||||
|
||||
class All(Expression):
|
||||
'''Match candidate that matches all of the specified expressions.
|
||||
|
||||
.. note::
|
||||
|
||||
If no expressions are supplied then will always match.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, expressions=None):
|
||||
'''Initialise with list of *expressions* to match against.'''
|
||||
self._expressions = expressions or []
|
||||
super(All, self).__init__()
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return '<{0} [{1}]>'.format(
|
||||
self.__class__.__name__,
|
||||
' '.join(map(str, self._expressions))
|
||||
)
|
||||
|
||||
def match(self, candidate):
|
||||
'''Return whether *candidate* satisfies this expression.'''
|
||||
return all([
|
||||
expression.match(candidate) for expression in self._expressions
|
||||
])
|
||||
|
||||
|
||||
class Any(Expression):
|
||||
'''Match candidate that matches any of the specified expressions.
|
||||
|
||||
.. note::
|
||||
|
||||
If no expressions are supplied then will never match.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, expressions=None):
|
||||
'''Initialise with list of *expressions* to match against.'''
|
||||
self._expressions = expressions or []
|
||||
super(Any, self).__init__()
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return '<{0} [{1}]>'.format(
|
||||
self.__class__.__name__,
|
||||
' '.join(map(str, self._expressions))
|
||||
)
|
||||
|
||||
def match(self, candidate):
|
||||
'''Return whether *candidate* satisfies this expression.'''
|
||||
return any([
|
||||
expression.match(candidate) for expression in self._expressions
|
||||
])
|
||||
|
||||
|
||||
class Not(Expression):
|
||||
'''Negate expression.'''
|
||||
|
||||
def __init__(self, expression):
|
||||
'''Initialise with *expression* to negate.'''
|
||||
self._expression = expression
|
||||
super(Not, self).__init__()
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return '<{0} {1}>'.format(
|
||||
self.__class__.__name__,
|
||||
self._expression
|
||||
)
|
||||
|
||||
def match(self, candidate):
|
||||
'''Return whether *candidate* satisfies this expression.'''
|
||||
return not self._expression.match(candidate)
|
||||
|
||||
|
||||
class Condition(Expression):
|
||||
'''Represent condition.'''
|
||||
|
||||
def __init__(self, key, operator, value):
|
||||
'''Initialise condition.
|
||||
|
||||
*key* is the key to check on the data when matching. It can be a nested
|
||||
key represented by dots. For example, 'data.eventType' would attempt to
|
||||
match candidate['data']['eventType']. If the candidate is missing any
|
||||
of the requested keys then the match fails immediately.
|
||||
|
||||
*operator* is the operator function to use to perform the match between
|
||||
the retrieved candidate value and the conditional *value*.
|
||||
|
||||
If *value* is a string, it can use a wildcard '*' at the end to denote
|
||||
that any values matching the substring portion are valid when matching
|
||||
equality only.
|
||||
|
||||
'''
|
||||
self._key = key
|
||||
self._operator = operator
|
||||
self._value = value
|
||||
self._wildcard = '*'
|
||||
self._operatorMapping = {
|
||||
eq: '=',
|
||||
ne: '!=',
|
||||
ge: '>=',
|
||||
le: '<=',
|
||||
gt: '>',
|
||||
lt: '<'
|
||||
}
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return '<{0} {1}{2}{3}>'.format(
|
||||
self.__class__.__name__,
|
||||
self._key,
|
||||
self._operatorMapping.get(self._operator, self._operator),
|
||||
self._value
|
||||
)
|
||||
|
||||
def match(self, candidate):
|
||||
'''Return whether *candidate* satisfies this expression.'''
|
||||
key_parts = self._key.split('.')
|
||||
|
||||
try:
|
||||
value = candidate
|
||||
for keyPart in key_parts:
|
||||
value = value[keyPart]
|
||||
except (KeyError, TypeError):
|
||||
return False
|
||||
|
||||
if (
|
||||
self._operator is eq
|
||||
and isinstance(self._value, basestring)
|
||||
and self._value[-1] == self._wildcard
|
||||
):
|
||||
return self._value[:-1] in value
|
||||
else:
|
||||
return self._operator(value, self._value)
|
||||
1085
pype/vendor/ftrack_api/event/hub.py
vendored
Normal file
1085
pype/vendor/ftrack_api/event/hub.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
28
pype/vendor/ftrack_api/event/subscriber.py
vendored
Normal file
28
pype/vendor/ftrack_api/event/subscriber.py
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from builtins import object
|
||||
import ftrack_api.event.subscription
|
||||
|
||||
|
||||
class Subscriber(object):
|
||||
'''Represent event subscriber.'''
|
||||
|
||||
def __init__(self, subscription, callback, metadata, priority):
|
||||
'''Initialise subscriber.'''
|
||||
self.subscription = ftrack_api.event.subscription.Subscription(
|
||||
subscription
|
||||
)
|
||||
self.callback = callback
|
||||
self.metadata = metadata
|
||||
self.priority = priority
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return '<{0} metadata={1} subscription="{2}">'.format(
|
||||
self.__class__.__name__, self.metadata, self.subscription
|
||||
)
|
||||
|
||||
def interested_in(self, event):
|
||||
'''Return whether subscriber interested in *event*.'''
|
||||
return self.subscription.includes(event)
|
||||
24
pype/vendor/ftrack_api/event/subscription.py
vendored
Normal file
24
pype/vendor/ftrack_api/event/subscription.py
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from builtins import object
|
||||
import ftrack_api.event.expression
|
||||
|
||||
|
||||
class Subscription(object):
|
||||
'''Represent a subscription.'''
|
||||
|
||||
parser = ftrack_api.event.expression.Parser()
|
||||
|
||||
def __init__(self, subscription):
|
||||
'''Initialise with *subscription*.'''
|
||||
self._subscription = subscription
|
||||
self._expression = self.parser.parse(subscription)
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return self._subscription
|
||||
|
||||
def includes(self, event):
|
||||
'''Return whether subscription includes *event*.'''
|
||||
return self._expression.match(event)
|
||||
393
pype/vendor/ftrack_api/exception.py
vendored
Normal file
393
pype/vendor/ftrack_api/exception.py
vendored
Normal file
|
|
@ -0,0 +1,393 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from builtins import str
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import ftrack_api.entity.base
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
'''ftrack specific error.'''
|
||||
|
||||
default_message = 'Unspecified error occurred.'
|
||||
|
||||
def __init__(self, message=None, details=None):
|
||||
'''Initialise exception with *message*.
|
||||
|
||||
If *message* is None, the class 'default_message' will be used.
|
||||
|
||||
*details* should be a mapping of extra information that can be used in
|
||||
the message and also to provide more context.
|
||||
|
||||
'''
|
||||
if message is None:
|
||||
message = self.default_message
|
||||
|
||||
self.message = message
|
||||
self.details = details
|
||||
if self.details is None:
|
||||
self.details = {}
|
||||
|
||||
self.traceback = traceback.format_exc()
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
keys = {}
|
||||
for key, value in self.details.items():
|
||||
if isinstance(value, str):
|
||||
value = value.encode(sys.getfilesystemencoding())
|
||||
keys[key] = value
|
||||
|
||||
return str(self.message.format(**keys))
|
||||
|
||||
|
||||
class AuthenticationError(Error):
|
||||
'''Raise when an authentication error occurs.'''
|
||||
|
||||
default_message = 'Authentication error.'
|
||||
|
||||
|
||||
class ServerError(Error):
|
||||
'''Raise when the server reports an error.'''
|
||||
|
||||
default_message = 'Server reported error processing request.'
|
||||
|
||||
|
||||
class ServerCompatibilityError(ServerError):
|
||||
'''Raise when server appears incompatible.'''
|
||||
|
||||
default_message = 'Server incompatible.'
|
||||
|
||||
|
||||
class NotFoundError(Error):
|
||||
'''Raise when something that should exist is not found.'''
|
||||
|
||||
default_message = 'Not found.'
|
||||
|
||||
|
||||
class NotUniqueError(Error):
|
||||
'''Raise when unique value required and duplicate detected.'''
|
||||
|
||||
default_message = 'Non-unique value detected.'
|
||||
|
||||
|
||||
class IncorrectResultError(Error):
|
||||
'''Raise when a result is incorrect.'''
|
||||
|
||||
default_message = 'Incorrect result detected.'
|
||||
|
||||
|
||||
class NoResultFoundError(IncorrectResultError):
|
||||
'''Raise when a result was expected but no result was found.'''
|
||||
|
||||
default_message = 'Expected result, but no result was found.'
|
||||
|
||||
|
||||
class MultipleResultsFoundError(IncorrectResultError):
|
||||
'''Raise when a single result expected, but multiple results found.'''
|
||||
|
||||
default_message = 'Expected single result, but received multiple results.'
|
||||
|
||||
|
||||
class EntityTypeError(Error):
|
||||
'''Raise when an entity type error occurs.'''
|
||||
|
||||
default_message = 'Entity type error.'
|
||||
|
||||
|
||||
class UnrecognisedEntityTypeError(EntityTypeError):
|
||||
'''Raise when an unrecognised entity type detected.'''
|
||||
|
||||
default_message = 'Entity type "{entity_type}" not recognised.'
|
||||
|
||||
def __init__(self, entity_type, **kw):
|
||||
'''Initialise with *entity_type* that is unrecognised.'''
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
entity_type=entity_type
|
||||
))
|
||||
super(UnrecognisedEntityTypeError, self).__init__(**kw)
|
||||
|
||||
|
||||
class OperationError(Error):
|
||||
'''Raise when an operation error occurs.'''
|
||||
|
||||
default_message = 'Operation error.'
|
||||
|
||||
|
||||
class InvalidStateError(Error):
|
||||
'''Raise when an invalid state detected.'''
|
||||
|
||||
default_message = 'Invalid state.'
|
||||
|
||||
|
||||
class InvalidStateTransitionError(InvalidStateError):
|
||||
'''Raise when an invalid state transition detected.'''
|
||||
|
||||
default_message = (
|
||||
'Invalid transition from {current_state!r} to {target_state!r} state '
|
||||
'for entity {entity!r}'
|
||||
)
|
||||
|
||||
def __init__(self, current_state, target_state, entity, **kw):
|
||||
'''Initialise error.'''
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
current_state=current_state,
|
||||
target_state=target_state,
|
||||
entity=entity
|
||||
))
|
||||
super(InvalidStateTransitionError, self).__init__(**kw)
|
||||
|
||||
|
||||
class AttributeError(Error):
|
||||
'''Raise when an error related to an attribute occurs.'''
|
||||
|
||||
default_message = 'Attribute error.'
|
||||
|
||||
|
||||
class ImmutableAttributeError(AttributeError):
|
||||
'''Raise when modification of immutable attribute attempted.'''
|
||||
|
||||
default_message = (
|
||||
'Cannot modify value of immutable {attribute.name!r} attribute.'
|
||||
)
|
||||
|
||||
def __init__(self, attribute, **kw):
|
||||
'''Initialise error.'''
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
attribute=attribute
|
||||
))
|
||||
super(ImmutableAttributeError, self).__init__(**kw)
|
||||
|
||||
|
||||
class CollectionError(Error):
|
||||
'''Raise when an error related to collections occurs.'''
|
||||
|
||||
default_message = 'Collection error.'
|
||||
|
||||
def __init__(self, collection, **kw):
|
||||
'''Initialise error.'''
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
collection=collection
|
||||
))
|
||||
super(CollectionError, self).__init__(**kw)
|
||||
|
||||
|
||||
class ImmutableCollectionError(CollectionError):
|
||||
'''Raise when modification of immutable collection attempted.'''
|
||||
|
||||
default_message = (
|
||||
'Cannot modify value of immutable collection {collection!r}.'
|
||||
)
|
||||
|
||||
|
||||
class DuplicateItemInCollectionError(CollectionError):
|
||||
'''Raise when duplicate item in collection detected.'''
|
||||
|
||||
default_message = (
|
||||
'Item {item!r} already exists in collection {collection!r}.'
|
||||
)
|
||||
|
||||
def __init__(self, item, collection, **kw):
|
||||
'''Initialise error.'''
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
item=item
|
||||
))
|
||||
super(DuplicateItemInCollectionError, self).__init__(collection, **kw)
|
||||
|
||||
|
||||
class ParseError(Error):
|
||||
'''Raise when a parsing error occurs.'''
|
||||
|
||||
default_message = 'Failed to parse.'
|
||||
|
||||
|
||||
class EventHubError(Error):
|
||||
'''Raise when issues related to event hub occur.'''
|
||||
|
||||
default_message = 'Event hub error occurred.'
|
||||
|
||||
|
||||
class EventHubConnectionError(EventHubError):
|
||||
'''Raise when event hub encounters connection problem.'''
|
||||
|
||||
default_message = 'Event hub is not connected.'
|
||||
|
||||
|
||||
class EventHubPacketError(EventHubError):
|
||||
'''Raise when event hub encounters an issue with a packet.'''
|
||||
|
||||
default_message = 'Invalid packet.'
|
||||
|
||||
|
||||
class PermissionDeniedError(Error):
|
||||
'''Raise when permission is denied.'''
|
||||
|
||||
default_message = 'Permission denied.'
|
||||
|
||||
|
||||
class LocationError(Error):
|
||||
'''Base for errors associated with locations.'''
|
||||
|
||||
default_message = 'Unspecified location error'
|
||||
|
||||
|
||||
class ComponentNotInAnyLocationError(LocationError):
|
||||
'''Raise when component not available in any location.'''
|
||||
|
||||
default_message = 'Component not available in any location.'
|
||||
|
||||
|
||||
class ComponentNotInLocationError(LocationError):
|
||||
'''Raise when component(s) not in location.'''
|
||||
|
||||
default_message = (
|
||||
'Component(s) {formatted_components} not found in location {location}.'
|
||||
)
|
||||
|
||||
def __init__(self, components, location, **kw):
|
||||
'''Initialise with *components* and *location*.'''
|
||||
if isinstance(components, ftrack_api.entity.base.Entity):
|
||||
components = [components]
|
||||
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
components=components,
|
||||
formatted_components=', '.join(
|
||||
[str(component) for component in components]
|
||||
),
|
||||
location=location
|
||||
))
|
||||
|
||||
super(ComponentNotInLocationError, self).__init__(**kw)
|
||||
|
||||
|
||||
class ComponentInLocationError(LocationError):
|
||||
'''Raise when component(s) already exists in location.'''
|
||||
|
||||
default_message = (
|
||||
'Component(s) {formatted_components} already exist in location '
|
||||
'{location}.'
|
||||
)
|
||||
|
||||
def __init__(self, components, location, **kw):
|
||||
'''Initialise with *components* and *location*.'''
|
||||
if isinstance(components, ftrack_api.entity.base.Entity):
|
||||
components = [components]
|
||||
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
components=components,
|
||||
formatted_components=', '.join(
|
||||
[str(component) for component in components]
|
||||
),
|
||||
location=location
|
||||
))
|
||||
|
||||
super(ComponentInLocationError, self).__init__(**kw)
|
||||
|
||||
|
||||
class AccessorError(Error):
|
||||
'''Base for errors associated with accessors.'''
|
||||
|
||||
default_message = 'Unspecified accessor error'
|
||||
|
||||
|
||||
class AccessorOperationFailedError(AccessorError):
|
||||
'''Base for failed operations on accessors.'''
|
||||
|
||||
default_message = 'Operation {operation} failed: {error}'
|
||||
|
||||
def __init__(
|
||||
self, operation='', resource_identifier=None, error=None, **kw
|
||||
):
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
operation=operation,
|
||||
resource_identifier=resource_identifier,
|
||||
error=error
|
||||
))
|
||||
super(AccessorOperationFailedError, self).__init__(**kw)
|
||||
|
||||
|
||||
class AccessorUnsupportedOperationError(AccessorOperationFailedError):
|
||||
'''Raise when operation is unsupported.'''
|
||||
|
||||
default_message = 'Operation {operation} unsupported.'
|
||||
|
||||
|
||||
class AccessorPermissionDeniedError(AccessorOperationFailedError):
|
||||
'''Raise when permission denied.'''
|
||||
|
||||
default_message = (
|
||||
'Cannot {operation} {resource_identifier}. Permission denied.'
|
||||
)
|
||||
|
||||
|
||||
class AccessorResourceIdentifierError(AccessorError):
|
||||
'''Raise when a error related to a resource_identifier occurs.'''
|
||||
|
||||
default_message = 'Resource identifier is invalid: {resource_identifier}.'
|
||||
|
||||
def __init__(self, resource_identifier, **kw):
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
resource_identifier=resource_identifier
|
||||
))
|
||||
super(AccessorResourceIdentifierError, self).__init__(**kw)
|
||||
|
||||
|
||||
class AccessorFilesystemPathError(AccessorResourceIdentifierError):
|
||||
'''Raise when a error related to an accessor filesystem path occurs.'''
|
||||
|
||||
default_message = (
|
||||
'Could not determine filesystem path from resource identifier: '
|
||||
'{resource_identifier}.'
|
||||
)
|
||||
|
||||
|
||||
class AccessorResourceError(AccessorError):
|
||||
'''Base for errors associated with specific resource.'''
|
||||
|
||||
default_message = 'Unspecified resource error: {resource_identifier}'
|
||||
|
||||
def __init__(self, operation='', resource_identifier=None, error=None,
|
||||
**kw):
|
||||
kw.setdefault('details', {}).update(dict(
|
||||
operation=operation,
|
||||
resource_identifier=resource_identifier
|
||||
))
|
||||
super(AccessorResourceError, self).__init__(**kw)
|
||||
|
||||
|
||||
class AccessorResourceNotFoundError(AccessorResourceError):
|
||||
'''Raise when a required resource is not found.'''
|
||||
|
||||
default_message = 'Resource not found: {resource_identifier}'
|
||||
|
||||
|
||||
class AccessorParentResourceNotFoundError(AccessorResourceError):
|
||||
'''Raise when a parent resource (such as directory) is not found.'''
|
||||
|
||||
default_message = 'Parent resource is missing: {resource_identifier}'
|
||||
|
||||
|
||||
class AccessorResourceInvalidError(AccessorResourceError):
|
||||
'''Raise when a resource is not the right type.'''
|
||||
|
||||
default_message = 'Resource invalid: {resource_identifier}'
|
||||
|
||||
|
||||
class AccessorContainerNotEmptyError(AccessorResourceError):
|
||||
'''Raise when container is not empty.'''
|
||||
|
||||
default_message = 'Container is not empty: {resource_identifier}'
|
||||
|
||||
|
||||
class StructureError(Error):
|
||||
'''Base for errors associated with structures.'''
|
||||
|
||||
default_message = 'Unspecified structure error'
|
||||
|
||||
|
||||
class ConnectionClosedError(Error):
|
||||
'''Raise when attempt to use closed connection detected.'''
|
||||
|
||||
default_message = "Connection closed."
|
||||
132
pype/vendor/ftrack_api/formatter.py
vendored
Normal file
132
pype/vendor/ftrack_api/formatter.py
vendored
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from builtins import str
|
||||
import termcolor
|
||||
|
||||
import ftrack_api.entity.base
|
||||
import ftrack_api.collection
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.inspection
|
||||
|
||||
|
||||
#: Useful filters to pass to :func:`format`.`
|
||||
FILTER = {
|
||||
'ignore_unset': (
|
||||
lambda entity, name, value: value is not ftrack_api.symbol.NOT_SET
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def format(
|
||||
entity, formatters=None, attribute_filter=None, recursive=False,
|
||||
indent=0, indent_first_line=True, _seen=None
|
||||
):
|
||||
'''Return formatted string representing *entity*.
|
||||
|
||||
*formatters* can be used to customise formatting of elements. It should be a
|
||||
mapping with one or more of the following keys:
|
||||
|
||||
* header - Used to format entity type.
|
||||
* label - Used to format attribute names.
|
||||
|
||||
Specify an *attribute_filter* to control which attributes to include. By
|
||||
default all attributes are included. The *attribute_filter* should be a
|
||||
callable that accepts `(entity, attribute_name, attribute_value)` and
|
||||
returns True if the attribute should be included in the output. For example,
|
||||
to filter out all unset values::
|
||||
|
||||
attribute_filter=ftrack_api.formatter.FILTER['ignore_unset']
|
||||
|
||||
If *recursive* is True then recurse into Collections and format each entity
|
||||
present.
|
||||
|
||||
*indent* specifies the overall indentation in spaces of the formatted text,
|
||||
whilst *indent_first_line* determines whether to apply that indent to the
|
||||
first generated line.
|
||||
|
||||
.. warning::
|
||||
|
||||
Iterates over all *entity* attributes which may cause multiple queries
|
||||
to the server. Turn off auto populating in the session to prevent this.
|
||||
|
||||
'''
|
||||
# Initialise default formatters.
|
||||
if formatters is None:
|
||||
formatters = dict()
|
||||
|
||||
formatters.setdefault(
|
||||
'header', lambda text: termcolor.colored(
|
||||
text, 'white', 'on_blue', attrs=['bold']
|
||||
)
|
||||
)
|
||||
formatters.setdefault(
|
||||
'label', lambda text: termcolor.colored(
|
||||
text, 'blue', attrs=['bold']
|
||||
)
|
||||
)
|
||||
|
||||
# Determine indents.
|
||||
spacer = ' ' * indent
|
||||
if indent_first_line:
|
||||
first_line_spacer = spacer
|
||||
else:
|
||||
first_line_spacer = ''
|
||||
|
||||
# Avoid infinite recursion on circular references.
|
||||
if _seen is None:
|
||||
_seen = set()
|
||||
|
||||
identifier = str(ftrack_api.inspection.identity(entity))
|
||||
if identifier in _seen:
|
||||
return (
|
||||
first_line_spacer +
|
||||
formatters['header'](entity.entity_type) + '{...}'
|
||||
)
|
||||
|
||||
_seen.add(identifier)
|
||||
information = list()
|
||||
|
||||
information.append(
|
||||
first_line_spacer + formatters['header'](entity.entity_type)
|
||||
)
|
||||
for key, value in sorted(entity.items()):
|
||||
if attribute_filter is not None:
|
||||
if not attribute_filter(entity, key, value):
|
||||
continue
|
||||
|
||||
child_indent = indent + len(key) + 3
|
||||
|
||||
if isinstance(value, ftrack_api.entity.base.Entity):
|
||||
value = format(
|
||||
value,
|
||||
formatters=formatters,
|
||||
attribute_filter=attribute_filter,
|
||||
recursive=recursive,
|
||||
indent=child_indent,
|
||||
indent_first_line=False,
|
||||
_seen=_seen.copy()
|
||||
)
|
||||
|
||||
if isinstance(value, ftrack_api.collection.Collection):
|
||||
if recursive:
|
||||
child_values = []
|
||||
for index, child in enumerate(value):
|
||||
child_value = format(
|
||||
child,
|
||||
formatters=formatters,
|
||||
attribute_filter=attribute_filter,
|
||||
recursive=recursive,
|
||||
indent=child_indent,
|
||||
indent_first_line=index != 0,
|
||||
_seen=_seen.copy()
|
||||
)
|
||||
child_values.append(child_value)
|
||||
|
||||
value = '\n'.join(child_values)
|
||||
|
||||
information.append(
|
||||
spacer + u' {0}: {1}'.format(formatters['label'](key), value)
|
||||
)
|
||||
|
||||
return '\n'.join(information)
|
||||
141
pype/vendor/ftrack_api/inspection.py
vendored
Normal file
141
pype/vendor/ftrack_api/inspection.py
vendored
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
from builtins import str
|
||||
from future.utils import native_str
|
||||
import collections
|
||||
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.operation
|
||||
|
||||
|
||||
def identity(entity):
|
||||
'''Return unique identity of *entity*.'''
|
||||
return (
|
||||
str(entity.entity_type),
|
||||
list(primary_key(entity).values())
|
||||
)
|
||||
|
||||
|
||||
def primary_key(entity):
|
||||
'''Return primary key of *entity* as an ordered mapping of {field: value}.
|
||||
|
||||
To get just the primary key values::
|
||||
|
||||
primary_key(entity).values()
|
||||
|
||||
'''
|
||||
primary_key = collections.OrderedDict()
|
||||
for name in entity.primary_key_attributes:
|
||||
value = entity[name]
|
||||
if value is ftrack_api.symbol.NOT_SET:
|
||||
raise KeyError(
|
||||
'Missing required value for primary key attribute "{0}" on '
|
||||
'entity {1!r}.'.format(name, entity)
|
||||
)
|
||||
|
||||
|
||||
# This is something I am not happy about.
|
||||
# COMPAT!
|
||||
|
||||
primary_key[native_str(name)] = native_str(value)
|
||||
|
||||
return primary_key
|
||||
|
||||
|
||||
def _state(operation, state):
|
||||
'''Return state following *operation* against current *state*.'''
|
||||
if (
|
||||
isinstance(
|
||||
operation, ftrack_api.operation.CreateEntityOperation
|
||||
)
|
||||
and state is ftrack_api.symbol.NOT_SET
|
||||
):
|
||||
state = ftrack_api.symbol.CREATED
|
||||
|
||||
elif (
|
||||
isinstance(
|
||||
operation, ftrack_api.operation.UpdateEntityOperation
|
||||
)
|
||||
and state is ftrack_api.symbol.NOT_SET
|
||||
):
|
||||
state = ftrack_api.symbol.MODIFIED
|
||||
|
||||
elif isinstance(
|
||||
operation, ftrack_api.operation.DeleteEntityOperation
|
||||
):
|
||||
state = ftrack_api.symbol.DELETED
|
||||
|
||||
return state
|
||||
|
||||
|
||||
def state(entity):
|
||||
'''Return current *entity* state.
|
||||
|
||||
.. seealso:: :func:`ftrack_api.inspection.states`.
|
||||
|
||||
'''
|
||||
value = ftrack_api.symbol.NOT_SET
|
||||
|
||||
for operation in entity.session.recorded_operations:
|
||||
# Determine if operation refers to an entity and whether that entity
|
||||
# is *entity*.
|
||||
if (
|
||||
isinstance(
|
||||
operation,
|
||||
(
|
||||
ftrack_api.operation.CreateEntityOperation,
|
||||
ftrack_api.operation.UpdateEntityOperation,
|
||||
ftrack_api.operation.DeleteEntityOperation
|
||||
)
|
||||
)
|
||||
and operation.entity_type == entity.entity_type
|
||||
and operation.entity_key == primary_key(entity)
|
||||
):
|
||||
value = _state(operation, value)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def states(entities):
|
||||
'''Return current states of *entities*.
|
||||
|
||||
An optimised function for determining states of multiple entities in one
|
||||
go.
|
||||
|
||||
.. note::
|
||||
|
||||
All *entities* should belong to the same session.
|
||||
|
||||
.. seealso:: :func:`ftrack_api.inspection.state`.
|
||||
|
||||
'''
|
||||
if not entities:
|
||||
return []
|
||||
|
||||
session = entities[0].session
|
||||
|
||||
entities_by_identity = collections.OrderedDict()
|
||||
for entity in entities:
|
||||
key = (entity.entity_type, str(list(primary_key(entity).values())))
|
||||
entities_by_identity[key] = ftrack_api.symbol.NOT_SET
|
||||
|
||||
for operation in session.recorded_operations:
|
||||
if (
|
||||
isinstance(
|
||||
operation,
|
||||
(
|
||||
ftrack_api.operation.CreateEntityOperation,
|
||||
ftrack_api.operation.UpdateEntityOperation,
|
||||
ftrack_api.operation.DeleteEntityOperation
|
||||
)
|
||||
)
|
||||
):
|
||||
key = (operation.entity_type, str(list(operation.entity_key.values())))
|
||||
if key not in entities_by_identity:
|
||||
continue
|
||||
|
||||
value = _state(operation, entities_by_identity[key])
|
||||
entities_by_identity[key] = value
|
||||
|
||||
return list(entities_by_identity.values())
|
||||
27
pype/vendor/ftrack_api/logging.py
vendored
Normal file
27
pype/vendor/ftrack_api/logging.py
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2016 ftrack
|
||||
|
||||
|
||||
from builtins import object
|
||||
class LazyLogMessage(object):
|
||||
'''A log message that can be evaluated lazily for improved performance.
|
||||
|
||||
Example::
|
||||
|
||||
# Formatting of string will not occur unless debug logging enabled.
|
||||
logger.debug(LazyLogMessage(
|
||||
'Hello {0}', 'world'
|
||||
))
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, message, *args, **kwargs):
|
||||
'''Initialise with *message* format string and arguments.'''
|
||||
self.message = message
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return self.message.format(*self.args, **self.kwargs)
|
||||
|
||||
116
pype/vendor/ftrack_api/operation.py
vendored
Normal file
116
pype/vendor/ftrack_api/operation.py
vendored
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
from builtins import object
|
||||
import copy
|
||||
|
||||
|
||||
class Operations(object):
|
||||
'''Stack of operations.'''
|
||||
|
||||
def __init__(self):
|
||||
'''Initialise stack.'''
|
||||
self._stack = []
|
||||
super(Operations, self).__init__()
|
||||
|
||||
def clear(self):
|
||||
'''Clear all operations.'''
|
||||
del self._stack[:]
|
||||
|
||||
def push(self, operation):
|
||||
'''Push *operation* onto stack.'''
|
||||
self._stack.append(operation)
|
||||
|
||||
def pop(self):
|
||||
'''Pop and return most recent operation from stack.'''
|
||||
return self._stack.pop()
|
||||
|
||||
def __len__(self):
|
||||
'''Return count of operations.'''
|
||||
return len(self._stack)
|
||||
|
||||
def __iter__(self):
|
||||
'''Return iterator over operations.'''
|
||||
return iter(self._stack)
|
||||
|
||||
|
||||
class Operation(object):
|
||||
'''Represent an operation.'''
|
||||
|
||||
|
||||
class CreateEntityOperation(Operation):
|
||||
'''Represent create entity operation.'''
|
||||
|
||||
def __init__(self, entity_type, entity_key, entity_data):
|
||||
'''Initialise operation.
|
||||
|
||||
*entity_type* should be the type of entity in string form (as returned
|
||||
from :attr:`ftrack_api.entity.base.Entity.entity_type`).
|
||||
|
||||
*entity_key* should be the unique key for the entity and should follow
|
||||
the form returned from :func:`ftrack_api.inspection.primary_key`.
|
||||
|
||||
*entity_data* should be a mapping of the initial data to populate the
|
||||
entity with when creating.
|
||||
|
||||
.. note::
|
||||
|
||||
Shallow copies will be made of each value in *entity_data*.
|
||||
|
||||
'''
|
||||
super(CreateEntityOperation, self).__init__()
|
||||
self.entity_type = entity_type
|
||||
self.entity_key = entity_key
|
||||
self.entity_data = {}
|
||||
for key, value in list(entity_data.items()):
|
||||
self.entity_data[key] = copy.copy(value)
|
||||
|
||||
|
||||
class UpdateEntityOperation(Operation):
|
||||
'''Represent update entity operation.'''
|
||||
|
||||
def __init__(
|
||||
self, entity_type, entity_key, attribute_name, old_value, new_value
|
||||
):
|
||||
'''Initialise operation.
|
||||
|
||||
*entity_type* should be the type of entity in string form (as returned
|
||||
from :attr:`ftrack_api.entity.base.Entity.entity_type`).
|
||||
|
||||
*entity_key* should be the unique key for the entity and should follow
|
||||
the form returned from :func:`ftrack_api.inspection.primary_key`.
|
||||
|
||||
*attribute_name* should be the string name of the attribute being
|
||||
modified and *old_value* and *new_value* should reflect the change in
|
||||
value.
|
||||
|
||||
.. note::
|
||||
|
||||
Shallow copies will be made of both *old_value* and *new_value*.
|
||||
|
||||
'''
|
||||
super(UpdateEntityOperation, self).__init__()
|
||||
self.entity_type = entity_type
|
||||
self.entity_key = entity_key
|
||||
self.attribute_name = attribute_name
|
||||
self.old_value = copy.copy(old_value)
|
||||
self.new_value = copy.copy(new_value)
|
||||
|
||||
|
||||
class DeleteEntityOperation(Operation):
|
||||
'''Represent delete entity operation.'''
|
||||
|
||||
def __init__(self, entity_type, entity_key):
|
||||
'''Initialise operation.
|
||||
|
||||
*entity_type* should be the type of entity in string form (as returned
|
||||
from :attr:`ftrack_api.entity.base.Entity.entity_type`).
|
||||
|
||||
*entity_key* should be the unique key for the entity and should follow
|
||||
the form returned from :func:`ftrack_api.inspection.primary_key`.
|
||||
|
||||
'''
|
||||
super(DeleteEntityOperation, self).__init__()
|
||||
self.entity_type = entity_type
|
||||
self.entity_key = entity_key
|
||||
|
||||
121
pype/vendor/ftrack_api/plugin.py
vendored
Normal file
121
pype/vendor/ftrack_api/plugin.py
vendored
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
import imp
|
||||
import inspect
|
||||
|
||||
|
||||
def discover(paths, positional_arguments=None, keyword_arguments=None):
|
||||
'''Find and load plugins in search *paths*.
|
||||
|
||||
Each discovered module should implement a register function that accepts
|
||||
*positional_arguments* and *keyword_arguments* as \*args and \*\*kwargs
|
||||
respectively.
|
||||
|
||||
If a register function does not accept variable arguments, then attempt to
|
||||
only pass accepted arguments to the function by inspecting its signature.
|
||||
|
||||
'''
|
||||
logger = logging.getLogger(__name__ + '.discover')
|
||||
|
||||
if positional_arguments is None:
|
||||
positional_arguments = []
|
||||
|
||||
if keyword_arguments is None:
|
||||
keyword_arguments = {}
|
||||
|
||||
for path in paths:
|
||||
# Ignore empty paths that could resolve to current directory.
|
||||
path = path.strip()
|
||||
if not path:
|
||||
continue
|
||||
|
||||
for base, directories, filenames in os.walk(path):
|
||||
for filename in filenames:
|
||||
name, extension = os.path.splitext(filename)
|
||||
if extension != '.py':
|
||||
continue
|
||||
|
||||
module_path = os.path.join(base, filename)
|
||||
unique_name = uuid.uuid4().hex
|
||||
|
||||
try:
|
||||
module = imp.load_source(unique_name, module_path)
|
||||
except Exception as error:
|
||||
logger.warning(
|
||||
'Failed to load plugin from "{0}": {1}'
|
||||
.format(module_path, error)
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
module.register
|
||||
except AttributeError:
|
||||
logger.warning(
|
||||
'Failed to load plugin that did not define a '
|
||||
'"register" function at the module level: {0}'
|
||||
.format(module_path)
|
||||
)
|
||||
else:
|
||||
# Attempt to only pass arguments that are accepted by the
|
||||
# register function.
|
||||
specification = inspect.getargspec(module.register)
|
||||
|
||||
selected_positional_arguments = positional_arguments
|
||||
selected_keyword_arguments = keyword_arguments
|
||||
|
||||
if (
|
||||
not specification.varargs and
|
||||
len(positional_arguments) > len(specification.args)
|
||||
):
|
||||
logger.warning(
|
||||
'Culling passed arguments to match register '
|
||||
'function signature.'
|
||||
)
|
||||
|
||||
selected_positional_arguments = positional_arguments[
|
||||
len(specification.args):
|
||||
]
|
||||
selected_keyword_arguments = {}
|
||||
|
||||
elif not specification.keywords:
|
||||
# Remove arguments that have been passed as positionals.
|
||||
remainder = specification.args[
|
||||
len(positional_arguments):
|
||||
]
|
||||
|
||||
# Determine remaining available keyword arguments.
|
||||
defined_keyword_arguments = []
|
||||
if specification.defaults:
|
||||
defined_keyword_arguments = specification.args[
|
||||
-len(specification.defaults):
|
||||
]
|
||||
|
||||
remaining_keyword_arguments = set([
|
||||
keyword_argument for keyword_argument
|
||||
in defined_keyword_arguments
|
||||
if keyword_argument in remainder
|
||||
])
|
||||
|
||||
if not set(keyword_arguments.keys()).issubset(
|
||||
remaining_keyword_arguments
|
||||
):
|
||||
logger.warning(
|
||||
'Culling passed arguments to match register '
|
||||
'function signature.'
|
||||
)
|
||||
selected_keyword_arguments = {
|
||||
key: value
|
||||
for key, value in list(keyword_arguments.items())
|
||||
if key in remaining_keyword_arguments
|
||||
}
|
||||
|
||||
module.register(
|
||||
*selected_positional_arguments,
|
||||
**selected_keyword_arguments
|
||||
)
|
||||
202
pype/vendor/ftrack_api/query.py
vendored
Normal file
202
pype/vendor/ftrack_api/query.py
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
import re
|
||||
import collections
|
||||
|
||||
import ftrack_api.exception
|
||||
|
||||
|
||||
class QueryResult(collections.Sequence):
|
||||
'''Results from a query.'''
|
||||
|
||||
OFFSET_EXPRESSION = re.compile('(?P<offset>offset (?P<value>\d+))')
|
||||
LIMIT_EXPRESSION = re.compile('(?P<limit>limit (?P<value>\d+))')
|
||||
|
||||
def __init__(self, session, expression, page_size=500):
|
||||
'''Initialise result set.
|
||||
|
||||
*session* should be an instance of :class:`ftrack_api.session.Session`
|
||||
that will be used for executing the query *expression*.
|
||||
|
||||
*page_size* should be an integer specifying the maximum number of
|
||||
records to fetch in one request allowing the results to be fetched
|
||||
incrementally in a transparent manner for optimal performance. Any
|
||||
offset or limit specified in *expression* are honoured for final result
|
||||
set, but intermediate queries may be issued with different offsets and
|
||||
limits in order to fetch pages. When an embedded limit is smaller than
|
||||
the given *page_size* it will be used instead and no paging will take
|
||||
place.
|
||||
|
||||
.. warning::
|
||||
|
||||
Setting *page_size* to a very large amount may negatively impact
|
||||
performance of not only the caller, but the server in general.
|
||||
|
||||
'''
|
||||
super(QueryResult, self).__init__()
|
||||
self._session = session
|
||||
self._results = []
|
||||
|
||||
(
|
||||
self._expression,
|
||||
self._offset,
|
||||
self._limit
|
||||
) = self._extract_offset_and_limit(expression)
|
||||
|
||||
self._page_size = page_size
|
||||
if self._limit is not None and self._limit < self._page_size:
|
||||
# Optimise case where embedded limit is less than fetching a
|
||||
# single page.
|
||||
self._page_size = self._limit
|
||||
|
||||
self._next_offset = self._offset
|
||||
if self._next_offset is None:
|
||||
# Initialise with zero offset.
|
||||
self._next_offset = 0
|
||||
|
||||
def _extract_offset_and_limit(self, expression):
|
||||
'''Process *expression* extracting offset and limit.
|
||||
|
||||
Return (expression, offset, limit).
|
||||
|
||||
'''
|
||||
offset = None
|
||||
match = self.OFFSET_EXPRESSION.search(expression)
|
||||
if match:
|
||||
offset = int(match.group('value'))
|
||||
expression = (
|
||||
expression[:match.start('offset')] +
|
||||
expression[match.end('offset'):]
|
||||
)
|
||||
|
||||
limit = None
|
||||
match = self.LIMIT_EXPRESSION.search(expression)
|
||||
if match:
|
||||
limit = int(match.group('value'))
|
||||
expression = (
|
||||
expression[:match.start('limit')] +
|
||||
expression[match.end('limit'):]
|
||||
)
|
||||
|
||||
return expression.strip(), offset, limit
|
||||
|
||||
def __getitem__(self, index):
|
||||
'''Return value at *index*.'''
|
||||
while self._can_fetch_more() and index >= len(self._results):
|
||||
self._fetch_more()
|
||||
|
||||
return self._results[index]
|
||||
|
||||
def __len__(self):
|
||||
'''Return number of items.'''
|
||||
while self._can_fetch_more():
|
||||
self._fetch_more()
|
||||
|
||||
return len(self._results)
|
||||
|
||||
def _can_fetch_more(self):
|
||||
'''Return whether more results are available to fetch.'''
|
||||
return self._next_offset is not None
|
||||
|
||||
def _fetch_more(self):
|
||||
'''Fetch next page of results if available.'''
|
||||
if not self._can_fetch_more():
|
||||
return
|
||||
|
||||
expression = '{0} offset {1} limit {2}'.format(
|
||||
self._expression, self._next_offset, self._page_size
|
||||
)
|
||||
records, metadata = self._session._query(expression)
|
||||
self._results.extend(records)
|
||||
|
||||
if self._limit is not None and (len(self._results) >= self._limit):
|
||||
# Original limit reached.
|
||||
self._next_offset = None
|
||||
del self._results[self._limit:]
|
||||
else:
|
||||
# Retrieve next page offset from returned metadata.
|
||||
self._next_offset = metadata.get('next', {}).get('offset', None)
|
||||
|
||||
def all(self):
|
||||
'''Fetch and return all data.'''
|
||||
return list(self)
|
||||
|
||||
def one(self):
|
||||
'''Return exactly one single result from query by applying a limit.
|
||||
|
||||
Raise :exc:`ValueError` if an existing limit is already present in the
|
||||
expression.
|
||||
|
||||
Raise :exc:`ValueError` if an existing offset is already present in the
|
||||
expression as offset is inappropriate when expecting a single item.
|
||||
|
||||
Raise :exc:`~ftrack_api.exception.MultipleResultsFoundError` if more
|
||||
than one result was available or
|
||||
:exc:`~ftrack_api.exception.NoResultFoundError` if no results were
|
||||
available.
|
||||
|
||||
.. note::
|
||||
|
||||
Both errors subclass
|
||||
:exc:`~ftrack_api.exception.IncorrectResultError` if you want to
|
||||
catch only one error type.
|
||||
|
||||
'''
|
||||
expression = self._expression
|
||||
|
||||
if self._limit is not None:
|
||||
raise ValueError(
|
||||
'Expression already contains a limit clause.'
|
||||
)
|
||||
|
||||
if self._offset is not None:
|
||||
raise ValueError(
|
||||
'Expression contains an offset clause which does not make '
|
||||
'sense when selecting a single item.'
|
||||
)
|
||||
|
||||
# Apply custom limit as optimisation. A limit of 2 is used rather than
|
||||
# 1 so that it is possible to test for multiple matching entries
|
||||
# case.
|
||||
expression += ' limit 2'
|
||||
|
||||
results, metadata = self._session._query(expression)
|
||||
|
||||
if not results:
|
||||
raise ftrack_api.exception.NoResultFoundError()
|
||||
|
||||
if len(results) != 1:
|
||||
raise ftrack_api.exception.MultipleResultsFoundError()
|
||||
|
||||
return results[0]
|
||||
|
||||
def first(self):
|
||||
'''Return first matching result from query by applying a limit.
|
||||
|
||||
Raise :exc:`ValueError` if an existing limit is already present in the
|
||||
expression.
|
||||
|
||||
If no matching result available return None.
|
||||
|
||||
'''
|
||||
expression = self._expression
|
||||
|
||||
if self._limit is not None:
|
||||
raise ValueError(
|
||||
'Expression already contains a limit clause.'
|
||||
)
|
||||
|
||||
# Apply custom offset if present.
|
||||
if self._offset is not None:
|
||||
expression += ' offset {0}'.format(self._offset)
|
||||
|
||||
# Apply custom limit as optimisation.
|
||||
expression += ' limit 1'
|
||||
|
||||
results, metadata = self._session._query(expression)
|
||||
|
||||
if results:
|
||||
return results[0]
|
||||
|
||||
return None
|
||||
2
pype/vendor/ftrack_api/resource_identifier_transformer/__init__.py
vendored
Normal file
2
pype/vendor/ftrack_api/resource_identifier_transformer/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
51
pype/vendor/ftrack_api/resource_identifier_transformer/base.py
vendored
Normal file
51
pype/vendor/ftrack_api/resource_identifier_transformer/base.py
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
|
||||
from builtins import object
|
||||
class ResourceIdentifierTransformer(object):
|
||||
'''Transform resource identifiers.
|
||||
|
||||
Provide ability to modify resource identifier before it is stored centrally
|
||||
(:meth:`encode`), or after it has been retrieved, but before it is used
|
||||
locally (:meth:`decode`).
|
||||
|
||||
For example, you might want to decompose paths into a set of key, value
|
||||
pairs to store centrally and then compose a path from those values when
|
||||
reading back.
|
||||
|
||||
.. note::
|
||||
|
||||
This is separate from any transformations an
|
||||
:class:`ftrack_api.accessor.base.Accessor` may perform and is targeted
|
||||
towards common transformations.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, session):
|
||||
'''Initialise resource identifier transformer.
|
||||
|
||||
*session* should be the :class:`ftrack_api.session.Session` instance
|
||||
to use for communication with the server.
|
||||
|
||||
'''
|
||||
self.session = session
|
||||
super(ResourceIdentifierTransformer, self).__init__()
|
||||
|
||||
def encode(self, resource_identifier, context=None):
|
||||
'''Return encoded *resource_identifier* for storing centrally.
|
||||
|
||||
A mapping of *context* values may be supplied to guide the
|
||||
transformation.
|
||||
|
||||
'''
|
||||
return resource_identifier
|
||||
|
||||
def decode(self, resource_identifier, context=None):
|
||||
'''Return decoded *resource_identifier* for use locally.
|
||||
|
||||
A mapping of *context* values may be supplied to guide the
|
||||
transformation.
|
||||
|
||||
'''
|
||||
return resource_identifier
|
||||
2468
pype/vendor/ftrack_api/session.py
vendored
Normal file
2468
pype/vendor/ftrack_api/session.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
pype/vendor/ftrack_api/structure/__init__.py
vendored
Normal file
2
pype/vendor/ftrack_api/structure/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
38
pype/vendor/ftrack_api/structure/base.py
vendored
Normal file
38
pype/vendor/ftrack_api/structure/base.py
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from builtins import object
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from future.utils import with_metaclass
|
||||
|
||||
|
||||
class Structure(with_metaclass(ABCMeta, object)):
|
||||
'''Structure plugin interface.
|
||||
|
||||
A structure plugin should compute appropriate paths for data.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, prefix=''):
|
||||
'''Initialise structure.'''
|
||||
self.prefix = prefix
|
||||
self.path_separator = '/'
|
||||
super(Structure, self).__init__()
|
||||
|
||||
@abstractmethod
|
||||
def get_resource_identifier(self, entity, context=None):
|
||||
'''Return a resource identifier for supplied *entity*.
|
||||
|
||||
*context* can be a mapping that supplies additional information.
|
||||
|
||||
'''
|
||||
|
||||
def _get_sequence_expression(self, sequence):
|
||||
'''Return a sequence expression for *sequence* component.'''
|
||||
padding = sequence['padding']
|
||||
if padding:
|
||||
expression = '%0{0}d'.format(padding)
|
||||
else:
|
||||
expression = '%d'
|
||||
|
||||
return expression
|
||||
12
pype/vendor/ftrack_api/structure/entity_id.py
vendored
Normal file
12
pype/vendor/ftrack_api/structure/entity_id.py
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
import ftrack_api.structure.base
|
||||
|
||||
|
||||
class EntityIdStructure(ftrack_api.structure.base.Structure):
|
||||
'''Entity id pass-through structure.'''
|
||||
|
||||
def get_resource_identifier(self, entity, context=None):
|
||||
'''Return a *resourceIdentifier* for supplied *entity*.'''
|
||||
return entity['id']
|
||||
91
pype/vendor/ftrack_api/structure/id.py
vendored
Normal file
91
pype/vendor/ftrack_api/structure/id.py
vendored
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
import os
|
||||
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.structure.base
|
||||
|
||||
|
||||
class IdStructure(ftrack_api.structure.base.Structure):
|
||||
'''Id based structure supporting Components only.
|
||||
|
||||
A components unique id will be used to form a path to store the data at.
|
||||
To avoid millions of entries in one directory each id is chunked into four
|
||||
prefix directories with the remainder used to name the file::
|
||||
|
||||
/prefix/1/2/3/4/56789
|
||||
|
||||
If the component has a defined filetype it will be added to the path::
|
||||
|
||||
/prefix/1/2/3/4/56789.exr
|
||||
|
||||
Components that are children of container components will be placed inside
|
||||
the id structure of their parent::
|
||||
|
||||
/prefix/1/2/3/4/56789/355827648d.exr
|
||||
/prefix/1/2/3/4/56789/ajf24215b5.exr
|
||||
|
||||
However, sequence children will be named using their label as an index and
|
||||
a common prefix of 'file.'::
|
||||
|
||||
/prefix/1/2/3/4/56789/file.0001.exr
|
||||
/prefix/1/2/3/4/56789/file.0002.exr
|
||||
|
||||
'''
|
||||
|
||||
def get_resource_identifier(self, entity, context=None):
|
||||
'''Return a resource identifier for supplied *entity*.
|
||||
|
||||
*context* can be a mapping that supplies additional information.
|
||||
|
||||
'''
|
||||
if entity.entity_type in ('FileComponent',):
|
||||
# When in a container, place the file inside a directory named
|
||||
# after the container.
|
||||
container = entity['container']
|
||||
if container and container is not ftrack_api.symbol.NOT_SET:
|
||||
path = self.get_resource_identifier(container)
|
||||
|
||||
if container.entity_type in ('SequenceComponent',):
|
||||
# Label doubles as index for now.
|
||||
name = 'file.{0}{1}'.format(
|
||||
entity['name'], entity['file_type']
|
||||
)
|
||||
parts = [os.path.dirname(path), name]
|
||||
|
||||
else:
|
||||
# Just place uniquely identified file into directory
|
||||
name = entity['id'] + entity['file_type']
|
||||
parts = [path, name]
|
||||
|
||||
else:
|
||||
name = entity['id'][4:] + entity['file_type']
|
||||
parts = ([self.prefix] + list(entity['id'][:4]) + [name])
|
||||
|
||||
elif entity.entity_type in ('SequenceComponent',):
|
||||
name = 'file'
|
||||
|
||||
# Add a sequence identifier.
|
||||
sequence_expression = self._get_sequence_expression(entity)
|
||||
name += '.{0}'.format(sequence_expression)
|
||||
|
||||
if (
|
||||
entity['file_type'] and
|
||||
entity['file_type'] is not ftrack_api.symbol.NOT_SET
|
||||
):
|
||||
name += entity['file_type']
|
||||
|
||||
parts = ([self.prefix] + list(entity['id'][:4])
|
||||
+ [entity['id'][4:]] + [name])
|
||||
|
||||
elif entity.entity_type in ('ContainerComponent',):
|
||||
# Just an id directory
|
||||
parts = ([self.prefix] +
|
||||
list(entity['id'][:4]) + [entity['id'][4:]])
|
||||
|
||||
else:
|
||||
raise NotImplementedError('Cannot generate path for unsupported '
|
||||
'entity {0}'.format(entity))
|
||||
|
||||
return self.path_separator.join(parts).strip('/')
|
||||
28
pype/vendor/ftrack_api/structure/origin.py
vendored
Normal file
28
pype/vendor/ftrack_api/structure/origin.py
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
from .base import Structure
|
||||
|
||||
|
||||
class OriginStructure(Structure):
|
||||
'''Origin structure that passes through existing resource identifier.'''
|
||||
|
||||
def get_resource_identifier(self, entity, context=None):
|
||||
'''Return a resource identifier for supplied *entity*.
|
||||
|
||||
*context* should be a mapping that includes at least a
|
||||
'source_resource_identifier' key that refers to the resource identifier
|
||||
to pass through.
|
||||
|
||||
'''
|
||||
if context is None:
|
||||
context = {}
|
||||
|
||||
resource_identifier = context.get('source_resource_identifier')
|
||||
if resource_identifier is None:
|
||||
raise ValueError(
|
||||
'Could not generate resource identifier as no source resource '
|
||||
'identifier found in passed context.'
|
||||
)
|
||||
|
||||
return resource_identifier
|
||||
215
pype/vendor/ftrack_api/structure/standard.py
vendored
Normal file
215
pype/vendor/ftrack_api/structure/standard.py
vendored
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2015 ftrack
|
||||
|
||||
from builtins import str
|
||||
import os
|
||||
import re
|
||||
import unicodedata
|
||||
|
||||
import ftrack_api.symbol
|
||||
import ftrack_api.structure.base
|
||||
|
||||
|
||||
class StandardStructure(ftrack_api.structure.base.Structure):
|
||||
'''Project hierarchy based structure that only supports Components.
|
||||
|
||||
The resource identifier is generated from the project code, the name
|
||||
of objects in the project structure, asset name and version number::
|
||||
|
||||
my_project/folder_a/folder_b/asset_name/v003
|
||||
|
||||
If the component is a `FileComponent` then the name of the component and the
|
||||
file type are used as filename in the resource_identifier::
|
||||
|
||||
my_project/folder_a/folder_b/asset_name/v003/foo.jpg
|
||||
|
||||
If the component is a `SequenceComponent` then a sequence expression,
|
||||
`%04d`, is used. E.g. a component with the name `foo` yields::
|
||||
|
||||
my_project/folder_a/folder_b/asset_name/v003/foo.%04d.jpg
|
||||
|
||||
For the member components their index in the sequence is used::
|
||||
|
||||
my_project/folder_a/folder_b/asset_name/v003/foo.0042.jpg
|
||||
|
||||
The name of the component is added to the resource identifier if the
|
||||
component is a `ContainerComponent`. E.g. a container component with the
|
||||
name `bar` yields::
|
||||
|
||||
my_project/folder_a/folder_b/asset_name/v003/bar
|
||||
|
||||
For a member of that container the file name is based on the component name
|
||||
and file type::
|
||||
|
||||
my_project/folder_a/folder_b/asset_name/v003/bar/baz.pdf
|
||||
|
||||
'''
|
||||
|
||||
def __init__(
|
||||
self, project_versions_prefix=None, illegal_character_substitute='_'
|
||||
):
|
||||
'''Initialise structure.
|
||||
|
||||
If *project_versions_prefix* is defined, insert after the project code
|
||||
for versions published directly under the project::
|
||||
|
||||
my_project/<project_versions_prefix>/v001/foo.jpg
|
||||
|
||||
Replace illegal characters with *illegal_character_substitute* if
|
||||
defined.
|
||||
|
||||
.. note::
|
||||
|
||||
Nested component containers/sequences are not supported.
|
||||
|
||||
'''
|
||||
super(StandardStructure, self).__init__()
|
||||
self.project_versions_prefix = project_versions_prefix
|
||||
self.illegal_character_substitute = illegal_character_substitute
|
||||
|
||||
def _get_parts(self, entity):
|
||||
'''Return resource identifier parts from *entity*.'''
|
||||
session = entity.session
|
||||
|
||||
version = entity['version']
|
||||
|
||||
if version is ftrack_api.symbol.NOT_SET and entity['version_id']:
|
||||
version = session.get('AssetVersion', entity['version_id'])
|
||||
|
||||
error_message = (
|
||||
'Component {0!r} must be attached to a committed '
|
||||
'version and a committed asset with a parent context.'.format(
|
||||
entity
|
||||
)
|
||||
)
|
||||
|
||||
if (
|
||||
version is ftrack_api.symbol.NOT_SET or
|
||||
version in session.created
|
||||
):
|
||||
raise ftrack_api.exception.StructureError(error_message)
|
||||
|
||||
link = version['link']
|
||||
|
||||
if not link:
|
||||
raise ftrack_api.exception.StructureError(error_message)
|
||||
|
||||
structure_names = [
|
||||
item['name']
|
||||
for item in link[1:-1]
|
||||
]
|
||||
|
||||
project_id = link[0]['id']
|
||||
project = session.get('Project', project_id)
|
||||
asset = version['asset']
|
||||
|
||||
version_number = self._format_version(version['version'])
|
||||
|
||||
parts = []
|
||||
parts.append(project['name'])
|
||||
|
||||
if structure_names:
|
||||
parts.extend(structure_names)
|
||||
elif self.project_versions_prefix:
|
||||
# Add *project_versions_prefix* if configured and the version is
|
||||
# published directly under the project.
|
||||
parts.append(self.project_versions_prefix)
|
||||
|
||||
parts.append(asset['name'])
|
||||
parts.append(version_number)
|
||||
|
||||
return [self.sanitise_for_filesystem(part) for part in parts]
|
||||
|
||||
def _format_version(self, number):
|
||||
'''Return a formatted string representing version *number*.'''
|
||||
return 'v{0:03d}'.format(number)
|
||||
|
||||
def sanitise_for_filesystem(self, value):
|
||||
'''Return *value* with illegal filesystem characters replaced.
|
||||
|
||||
An illegal character is one that is not typically valid for filesystem
|
||||
usage, such as non ascii characters, or can be awkward to use in a
|
||||
filesystem, such as spaces. Replace these characters with
|
||||
the character specified by *illegal_character_substitute* on
|
||||
initialisation. If no character was specified as substitute then return
|
||||
*value* unmodified.
|
||||
|
||||
'''
|
||||
if self.illegal_character_substitute is None:
|
||||
return value
|
||||
|
||||
value = unicodedata.normalize('NFKD', str(value)).encode('ascii', 'ignore')
|
||||
value = re.sub('[^\w\.-]', self.illegal_character_substitute, value.decode('utf-8'))
|
||||
return str(value.strip().lower())
|
||||
|
||||
def get_resource_identifier(self, entity, context=None):
|
||||
'''Return a resource identifier for supplied *entity*.
|
||||
|
||||
*context* can be a mapping that supplies additional information, but
|
||||
is unused in this implementation.
|
||||
|
||||
|
||||
Raise a :py:exc:`ftrack_api.exeption.StructureError` if *entity* is not
|
||||
attached to a committed version and a committed asset with a parent
|
||||
context.
|
||||
|
||||
'''
|
||||
if entity.entity_type in ('FileComponent',):
|
||||
container = entity['container']
|
||||
|
||||
if container:
|
||||
# Get resource identifier for container.
|
||||
container_path = self.get_resource_identifier(container)
|
||||
|
||||
if container.entity_type in ('SequenceComponent',):
|
||||
# Strip the sequence component expression from the parent
|
||||
# container and back the correct filename, i.e.
|
||||
# /sequence/component/sequence_component_name.0012.exr.
|
||||
name = '{0}.{1}{2}'.format(
|
||||
container['name'], entity['name'], entity['file_type']
|
||||
)
|
||||
parts = [
|
||||
os.path.dirname(container_path),
|
||||
self.sanitise_for_filesystem(name)
|
||||
]
|
||||
|
||||
else:
|
||||
# Container is not a sequence component so add it as a
|
||||
# normal component inside the container.
|
||||
name = entity['name'] + entity['file_type']
|
||||
parts = [
|
||||
container_path, self.sanitise_for_filesystem(name)
|
||||
]
|
||||
|
||||
else:
|
||||
# File component does not have a container, construct name from
|
||||
# component name and file type.
|
||||
parts = self._get_parts(entity)
|
||||
name = entity['name'] + entity['file_type']
|
||||
parts.append(self.sanitise_for_filesystem(name))
|
||||
|
||||
elif entity.entity_type in ('SequenceComponent',):
|
||||
# Create sequence expression for the sequence component and add it
|
||||
# to the parts.
|
||||
parts = self._get_parts(entity)
|
||||
sequence_expression = self._get_sequence_expression(entity)
|
||||
parts.append(
|
||||
'{0}.{1}{2}'.format(
|
||||
self.sanitise_for_filesystem(entity['name']),
|
||||
sequence_expression,
|
||||
self.sanitise_for_filesystem(entity['file_type'])
|
||||
)
|
||||
)
|
||||
|
||||
elif entity.entity_type in ('ContainerComponent',):
|
||||
# Add the name of the container to the resource identifier parts.
|
||||
parts = self._get_parts(entity)
|
||||
parts.append(self.sanitise_for_filesystem(entity['name']))
|
||||
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'Cannot generate resource identifier for unsupported '
|
||||
'entity {0!r}'.format(entity)
|
||||
)
|
||||
|
||||
return self.path_separator.join(parts)
|
||||
76
pype/vendor/ftrack_api/symbol.py
vendored
Normal file
76
pype/vendor/ftrack_api/symbol.py
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2014 ftrack
|
||||
|
||||
|
||||
from builtins import object
|
||||
class Symbol(object):
|
||||
'''A constant symbol.'''
|
||||
|
||||
def __init__(self, name, value=True):
|
||||
'''Initialise symbol with unique *name* and *value*.
|
||||
|
||||
*value* is used for nonzero testing.
|
||||
|
||||
'''
|
||||
self.name = name
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
'''Return representation.'''
|
||||
return '{0}({1})'.format(self.__class__.__name__, self.name)
|
||||
|
||||
def __bool__(self):
|
||||
'''Return whether symbol represents non-zero value.'''
|
||||
return bool(self.value)
|
||||
|
||||
def __copy__(self):
|
||||
'''Return shallow copy.
|
||||
|
||||
Overridden to always return same instance.
|
||||
|
||||
'''
|
||||
return self
|
||||
|
||||
|
||||
#: Symbol representing that no value has been set or loaded.
|
||||
NOT_SET = Symbol('NOT_SET', False)
|
||||
|
||||
#: Symbol representing created state.
|
||||
CREATED = Symbol('CREATED')
|
||||
|
||||
#: Symbol representing modified state.
|
||||
MODIFIED = Symbol('MODIFIED')
|
||||
|
||||
#: Symbol representing deleted state.
|
||||
DELETED = Symbol('DELETED')
|
||||
|
||||
#: Topic published when component added to a location.
|
||||
COMPONENT_ADDED_TO_LOCATION_TOPIC = 'ftrack.location.component-added'
|
||||
|
||||
#: Topic published when component removed from a location.
|
||||
COMPONENT_REMOVED_FROM_LOCATION_TOPIC = 'ftrack.location.component-removed'
|
||||
|
||||
#: Identifier of builtin origin location.
|
||||
ORIGIN_LOCATION_ID = 'ce9b348f-8809-11e3-821c-20c9d081909b'
|
||||
|
||||
#: Identifier of builtin unmanaged location.
|
||||
UNMANAGED_LOCATION_ID = 'cb268ecc-8809-11e3-a7e2-20c9d081909b'
|
||||
|
||||
#: Identifier of builtin review location.
|
||||
REVIEW_LOCATION_ID = 'cd41be70-8809-11e3-b98a-20c9d081909b'
|
||||
|
||||
#: Identifier of builtin connect location.
|
||||
CONNECT_LOCATION_ID = '07b82a97-8cf9-11e3-9383-20c9d081909b'
|
||||
|
||||
#: Identifier of builtin server location.
|
||||
SERVER_LOCATION_ID = '3a372bde-05bc-11e4-8908-20c9d081909b'
|
||||
|
||||
#: Chunk size used when working with data.
|
||||
CHUNK_SIZE = 8192
|
||||
|
||||
#: Symbol representing syncing users with ldap
|
||||
JOB_SYNC_USERS_LDAP = Symbol('SYNC_USERS_LDAP')
|
||||
93
pype/vendor/future/__init__.py
vendored
Normal file
93
pype/vendor/future/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
"""
|
||||
future: Easy, safe support for Python 2/3 compatibility
|
||||
=======================================================
|
||||
|
||||
``future`` is the missing compatibility layer between Python 2 and Python
|
||||
3. It allows you to use a single, clean Python 3.x-compatible codebase to
|
||||
support both Python 2 and Python 3 with minimal overhead.
|
||||
|
||||
It is designed to be used as follows::
|
||||
|
||||
from __future__ import (absolute_import, division,
|
||||
print_function, unicode_literals)
|
||||
from builtins import (
|
||||
bytes, dict, int, list, object, range, str,
|
||||
ascii, chr, hex, input, next, oct, open,
|
||||
pow, round, super,
|
||||
filter, map, zip)
|
||||
|
||||
followed by predominantly standard, idiomatic Python 3 code that then runs
|
||||
similarly on Python 2.6/2.7 and Python 3.3+.
|
||||
|
||||
The imports have no effect on Python 3. On Python 2, they shadow the
|
||||
corresponding builtins, which normally have different semantics on Python 3
|
||||
versus 2, to provide their Python 3 semantics.
|
||||
|
||||
|
||||
Standard library reorganization
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``future`` supports the standard library reorganization (PEP 3108) through the
|
||||
following Py3 interfaces:
|
||||
|
||||
>>> # Top-level packages with Py3 names provided on Py2:
|
||||
>>> import html.parser
|
||||
>>> import queue
|
||||
>>> import tkinter.dialog
|
||||
>>> import xmlrpc.client
|
||||
>>> # etc.
|
||||
|
||||
>>> # Aliases provided for extensions to existing Py2 module names:
|
||||
>>> from future.standard_library import install_aliases
|
||||
>>> install_aliases()
|
||||
|
||||
>>> from collections import Counter, OrderedDict # backported to Py2.6
|
||||
>>> from collections import UserDict, UserList, UserString
|
||||
>>> import urllib.request
|
||||
>>> from itertools import filterfalse, zip_longest
|
||||
>>> from subprocess import getoutput, getstatusoutput
|
||||
|
||||
|
||||
Automatic conversion
|
||||
--------------------
|
||||
|
||||
An included script called `futurize
|
||||
<http://python-future.org/automatic_conversion.html>`_ aids in converting
|
||||
code (from either Python 2 or Python 3) to code compatible with both
|
||||
platforms. It is similar to ``python-modernize`` but goes further in
|
||||
providing Python 3 compatibility through the use of the backported types
|
||||
and builtin functions in ``future``.
|
||||
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
See: http://python-future.org
|
||||
|
||||
|
||||
Credits
|
||||
-------
|
||||
|
||||
:Author: Ed Schofield
|
||||
:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
|
||||
Ltd, Singapore. http://pythoncharmers.com
|
||||
:Others: See docs/credits.rst or http://python-future.org/credits.html
|
||||
|
||||
|
||||
Licensing
|
||||
---------
|
||||
Copyright 2013-2016 Python Charmers Pty Ltd, Australia.
|
||||
The software is distributed under an MIT licence. See LICENSE.txt.
|
||||
|
||||
"""
|
||||
|
||||
__title__ = 'future'
|
||||
__author__ = 'Ed Schofield'
|
||||
__license__ = 'MIT'
|
||||
__copyright__ = 'Copyright 2013-2016 Python Charmers Pty Ltd'
|
||||
__ver_major__ = 0
|
||||
__ver_minor__ = 16
|
||||
__ver_patch__ = 0
|
||||
__ver_sub__ = ''
|
||||
__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__,
|
||||
__ver_patch__, __ver_sub__)
|
||||
26
pype/vendor/future/backports/__init__.py
vendored
Normal file
26
pype/vendor/future/backports/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
"""
|
||||
future.backports package
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
__future_module__ = True
|
||||
from future.standard_library import import_top_level_modules
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
import_top_level_modules()
|
||||
|
||||
|
||||
from .misc import (ceil,
|
||||
OrderedDict,
|
||||
Counter,
|
||||
ChainMap,
|
||||
check_output,
|
||||
count,
|
||||
recursive_repr,
|
||||
_count_elements,
|
||||
cmp_to_key
|
||||
)
|
||||
422
pype/vendor/future/backports/_markupbase.py
vendored
Normal file
422
pype/vendor/future/backports/_markupbase.py
vendored
Normal file
|
|
@ -0,0 +1,422 @@
|
|||
"""Shared support for scanning document type declarations in HTML and XHTML.
|
||||
|
||||
Backported for python-future from Python 3.3. Reason: ParserBase is an
|
||||
old-style class in the Python 2.7 source of markupbase.py, which I suspect
|
||||
might be the cause of sporadic unit-test failures on travis-ci.org with
|
||||
test_htmlparser.py. The test failures look like this:
|
||||
|
||||
======================================================================
|
||||
|
||||
ERROR: test_attr_entity_replacement (future.tests.test_htmlparser.AttributesStrictTestCase)
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Traceback (most recent call last):
|
||||
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 661, in test_attr_entity_replacement
|
||||
[("starttag", "a", [("b", "&><\"'")])])
|
||||
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 93, in _run_check
|
||||
collector = self.get_collector()
|
||||
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 617, in get_collector
|
||||
return EventCollector(strict=True)
|
||||
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 27, in __init__
|
||||
html.parser.HTMLParser.__init__(self, *args, **kw)
|
||||
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 135, in __init__
|
||||
self.reset()
|
||||
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 143, in reset
|
||||
_markupbase.ParserBase.reset(self)
|
||||
|
||||
TypeError: unbound method reset() must be called with ParserBase instance as first argument (got EventCollector instance instead)
|
||||
|
||||
This module is used as a foundation for the html.parser module. It has no
|
||||
documented public API and should not be used directly.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
|
||||
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
|
||||
_commentclose = re.compile(r'--\s*>')
|
||||
_markedsectionclose = re.compile(r']\s*]\s*>')
|
||||
|
||||
# An analysis of the MS-Word extensions is available at
|
||||
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
|
||||
|
||||
_msmarkedsectionclose = re.compile(r']\s*>')
|
||||
|
||||
del re
|
||||
|
||||
|
||||
class ParserBase(object):
|
||||
"""Parser base class which provides some common support methods used
|
||||
by the SGML/HTML and XHTML parsers."""
|
||||
|
||||
def __init__(self):
|
||||
if self.__class__ is ParserBase:
|
||||
raise RuntimeError(
|
||||
"_markupbase.ParserBase must be subclassed")
|
||||
|
||||
def error(self, message):
|
||||
raise NotImplementedError(
|
||||
"subclasses of ParserBase must override error()")
|
||||
|
||||
def reset(self):
|
||||
self.lineno = 1
|
||||
self.offset = 0
|
||||
|
||||
def getpos(self):
|
||||
"""Return current line number and offset."""
|
||||
return self.lineno, self.offset
|
||||
|
||||
# Internal -- update line number and offset. This should be
|
||||
# called for each piece of data exactly once, in order -- in other
|
||||
# words the concatenation of all the input strings to this
|
||||
# function should be exactly the entire input.
|
||||
def updatepos(self, i, j):
|
||||
if i >= j:
|
||||
return j
|
||||
rawdata = self.rawdata
|
||||
nlines = rawdata.count("\n", i, j)
|
||||
if nlines:
|
||||
self.lineno = self.lineno + nlines
|
||||
pos = rawdata.rindex("\n", i, j) # Should not fail
|
||||
self.offset = j-(pos+1)
|
||||
else:
|
||||
self.offset = self.offset + j-i
|
||||
return j
|
||||
|
||||
_decl_otherchars = ''
|
||||
|
||||
# Internal -- parse declaration (for use by subclasses).
|
||||
def parse_declaration(self, i):
|
||||
# This is some sort of declaration; in "HTML as
|
||||
# deployed," this should only be the document type
|
||||
# declaration ("<!DOCTYPE html...>").
|
||||
# ISO 8879:1986, however, has more complex
|
||||
# declaration syntax for elements in <!...>, including:
|
||||
# --comment--
|
||||
# [marked section]
|
||||
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
||||
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
||||
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
||||
rawdata = self.rawdata
|
||||
j = i + 2
|
||||
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
|
||||
if rawdata[j:j+1] == ">":
|
||||
# the empty comment <!>
|
||||
return j + 1
|
||||
if rawdata[j:j+1] in ("-", ""):
|
||||
# Start of comment followed by buffer boundary,
|
||||
# or just a buffer boundary.
|
||||
return -1
|
||||
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
|
||||
n = len(rawdata)
|
||||
if rawdata[j:j+2] == '--': #comment
|
||||
# Locate --.*-- as the body of the comment
|
||||
return self.parse_comment(i)
|
||||
elif rawdata[j] == '[': #marked section
|
||||
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
|
||||
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
|
||||
# Note that this is extended by Microsoft Office "Save as Web" function
|
||||
# to include [if...] and [endif].
|
||||
return self.parse_marked_section(i)
|
||||
else: #all other declaration elements
|
||||
decltype, j = self._scan_name(j, i)
|
||||
if j < 0:
|
||||
return j
|
||||
if decltype == "doctype":
|
||||
self._decl_otherchars = ''
|
||||
while j < n:
|
||||
c = rawdata[j]
|
||||
if c == ">":
|
||||
# end of declaration syntax
|
||||
data = rawdata[i+2:j]
|
||||
if decltype == "doctype":
|
||||
self.handle_decl(data)
|
||||
else:
|
||||
# According to the HTML5 specs sections "8.2.4.44 Bogus
|
||||
# comment state" and "8.2.4.45 Markup declaration open
|
||||
# state", a comment token should be emitted.
|
||||
# Calling unknown_decl provides more flexibility though.
|
||||
self.unknown_decl(data)
|
||||
return j + 1
|
||||
if c in "\"'":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if not m:
|
||||
return -1 # incomplete
|
||||
j = m.end()
|
||||
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
|
||||
name, j = self._scan_name(j, i)
|
||||
elif c in self._decl_otherchars:
|
||||
j = j + 1
|
||||
elif c == "[":
|
||||
# this could be handled in a separate doctype parser
|
||||
if decltype == "doctype":
|
||||
j = self._parse_doctype_subset(j + 1, i)
|
||||
elif decltype in set(["attlist", "linktype", "link", "element"]):
|
||||
# must tolerate []'d groups in a content model in an element declaration
|
||||
# also in data attribute specifications of attlist declaration
|
||||
# also link type declaration subsets in linktype declarations
|
||||
# also link attribute specification lists in link declarations
|
||||
self.error("unsupported '[' char in %s declaration" % decltype)
|
||||
else:
|
||||
self.error("unexpected '[' char in declaration")
|
||||
else:
|
||||
self.error(
|
||||
"unexpected %r char in declaration" % rawdata[j])
|
||||
if j < 0:
|
||||
return j
|
||||
return -1 # incomplete
|
||||
|
||||
# Internal -- parse a marked section
|
||||
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
|
||||
def parse_marked_section(self, i, report=1):
|
||||
rawdata= self.rawdata
|
||||
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
|
||||
sectName, j = self._scan_name( i+3, i )
|
||||
if j < 0:
|
||||
return j
|
||||
if sectName in set(["temp", "cdata", "ignore", "include", "rcdata"]):
|
||||
# look for standard ]]> ending
|
||||
match= _markedsectionclose.search(rawdata, i+3)
|
||||
elif sectName in set(["if", "else", "endif"]):
|
||||
# look for MS Office ]> ending
|
||||
match= _msmarkedsectionclose.search(rawdata, i+3)
|
||||
else:
|
||||
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
|
||||
if not match:
|
||||
return -1
|
||||
if report:
|
||||
j = match.start(0)
|
||||
self.unknown_decl(rawdata[i+3: j])
|
||||
return match.end(0)
|
||||
|
||||
# Internal -- parse comment, return length or -1 if not terminated
|
||||
def parse_comment(self, i, report=1):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+4] != '<!--':
|
||||
self.error('unexpected call to parse_comment()')
|
||||
match = _commentclose.search(rawdata, i+4)
|
||||
if not match:
|
||||
return -1
|
||||
if report:
|
||||
j = match.start(0)
|
||||
self.handle_comment(rawdata[i+4: j])
|
||||
return match.end(0)
|
||||
|
||||
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
|
||||
# returning the index just past any whitespace following the trailing ']'.
|
||||
def _parse_doctype_subset(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
n = len(rawdata)
|
||||
j = i
|
||||
while j < n:
|
||||
c = rawdata[j]
|
||||
if c == "<":
|
||||
s = rawdata[j:j+2]
|
||||
if s == "<":
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if s != "<!":
|
||||
self.updatepos(declstartpos, j + 1)
|
||||
self.error("unexpected char in internal subset (in %r)" % s)
|
||||
if (j + 2) == n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if (j + 4) > n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if rawdata[j:j+4] == "<!--":
|
||||
j = self.parse_comment(j, report=0)
|
||||
if j < 0:
|
||||
return j
|
||||
continue
|
||||
name, j = self._scan_name(j + 2, declstartpos)
|
||||
if j == -1:
|
||||
return -1
|
||||
if name not in set(["attlist", "element", "entity", "notation"]):
|
||||
self.updatepos(declstartpos, j + 2)
|
||||
self.error(
|
||||
"unknown declaration %r in internal subset" % name)
|
||||
# handle the individual names
|
||||
meth = getattr(self, "_parse_doctype_" + name)
|
||||
j = meth(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
elif c == "%":
|
||||
# parameter entity reference
|
||||
if (j + 1) == n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
s, j = self._scan_name(j + 1, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
if rawdata[j] == ";":
|
||||
j = j + 1
|
||||
elif c == "]":
|
||||
j = j + 1
|
||||
while j < n and rawdata[j].isspace():
|
||||
j = j + 1
|
||||
if j < n:
|
||||
if rawdata[j] == ">":
|
||||
return j
|
||||
self.updatepos(declstartpos, j)
|
||||
self.error("unexpected char after internal subset")
|
||||
else:
|
||||
return -1
|
||||
elif c.isspace():
|
||||
j = j + 1
|
||||
else:
|
||||
self.updatepos(declstartpos, j)
|
||||
self.error("unexpected char %r in internal subset" % c)
|
||||
# end of buffer reached
|
||||
return -1
|
||||
|
||||
# Internal -- scan past <!ELEMENT declarations
|
||||
def _parse_doctype_element(self, i, declstartpos):
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
if j == -1:
|
||||
return -1
|
||||
# style content model; just skip until '>'
|
||||
rawdata = self.rawdata
|
||||
if '>' in rawdata[j:]:
|
||||
return rawdata.find(">", j) + 1
|
||||
return -1
|
||||
|
||||
# Internal -- scan past <!ATTLIST declarations
|
||||
def _parse_doctype_attlist(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
c = rawdata[j:j+1]
|
||||
if c == "":
|
||||
return -1
|
||||
if c == ">":
|
||||
return j + 1
|
||||
while 1:
|
||||
# scan a series of attribute descriptions; simplified:
|
||||
# name type [value] [#constraint]
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
c = rawdata[j:j+1]
|
||||
if c == "":
|
||||
return -1
|
||||
if c == "(":
|
||||
# an enumerated type; look for ')'
|
||||
if ")" in rawdata[j:]:
|
||||
j = rawdata.find(")", j) + 1
|
||||
else:
|
||||
return -1
|
||||
while rawdata[j:j+1].isspace():
|
||||
j = j + 1
|
||||
if not rawdata[j:]:
|
||||
# end of buffer, incomplete
|
||||
return -1
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if m:
|
||||
j = m.end()
|
||||
else:
|
||||
return -1
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c == "#":
|
||||
if rawdata[j:] == "#":
|
||||
# end of buffer
|
||||
return -1
|
||||
name, j = self._scan_name(j + 1, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c == '>':
|
||||
# all done
|
||||
return j + 1
|
||||
|
||||
# Internal -- scan past <!NOTATION declarations
|
||||
def _parse_doctype_notation(self, i, declstartpos):
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
rawdata = self.rawdata
|
||||
while 1:
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if c == '>':
|
||||
return j + 1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if not m:
|
||||
return -1
|
||||
j = m.end()
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
|
||||
# Internal -- scan past <!ENTITY declarations
|
||||
def _parse_doctype_entity(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+1] == "%":
|
||||
j = i + 1
|
||||
while 1:
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c.isspace():
|
||||
j = j + 1
|
||||
else:
|
||||
break
|
||||
else:
|
||||
j = i
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
while 1:
|
||||
c = self.rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if m:
|
||||
j = m.end()
|
||||
else:
|
||||
return -1 # incomplete
|
||||
elif c == ">":
|
||||
return j + 1
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
|
||||
# Internal -- scan a name token and the new position and the token, or
|
||||
# return -1 if we've reached the end of the buffer.
|
||||
def _scan_name(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
n = len(rawdata)
|
||||
if i == n:
|
||||
return None, -1
|
||||
m = _declname_match(rawdata, i)
|
||||
if m:
|
||||
s = m.group()
|
||||
name = s.strip()
|
||||
if (i + len(s)) == n:
|
||||
return None, -1 # end of buffer
|
||||
return name.lower(), m.end()
|
||||
else:
|
||||
self.updatepos(declstartpos, i)
|
||||
self.error("expected name token at %r"
|
||||
% rawdata[declstartpos:declstartpos+20])
|
||||
|
||||
# To be overridden -- handlers for unknown objects
|
||||
def unknown_decl(self, data):
|
||||
pass
|
||||
2152
pype/vendor/future/backports/datetime.py
vendored
Normal file
2152
pype/vendor/future/backports/datetime.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
78
pype/vendor/future/backports/email/__init__.py
vendored
Normal file
78
pype/vendor/future/backports/email/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
# Copyright (C) 2001-2007 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""
|
||||
Backport of the Python 3.3 email package for Python-Future.
|
||||
|
||||
A package for parsing, handling, and generating email messages.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Install the surrogate escape handler here because this is used by many
|
||||
# modules in the email package.
|
||||
from future.utils import surrogateescape
|
||||
surrogateescape.register_surrogateescape()
|
||||
# (Should this be done globally by ``future``?)
|
||||
|
||||
|
||||
__version__ = '5.1.0'
|
||||
|
||||
__all__ = [
|
||||
'base64mime',
|
||||
'charset',
|
||||
'encoders',
|
||||
'errors',
|
||||
'feedparser',
|
||||
'generator',
|
||||
'header',
|
||||
'iterators',
|
||||
'message',
|
||||
'message_from_file',
|
||||
'message_from_binary_file',
|
||||
'message_from_string',
|
||||
'message_from_bytes',
|
||||
'mime',
|
||||
'parser',
|
||||
'quoprimime',
|
||||
'utils',
|
||||
]
|
||||
|
||||
|
||||
|
||||
# Some convenience routines. Don't import Parser and Message as side-effects
|
||||
# of importing email since those cascadingly import most of the rest of the
|
||||
# email package.
|
||||
def message_from_string(s, *args, **kws):
|
||||
"""Parse a string into a Message object model.
|
||||
|
||||
Optional _class and strict are passed to the Parser constructor.
|
||||
"""
|
||||
from future.backports.email.parser import Parser
|
||||
return Parser(*args, **kws).parsestr(s)
|
||||
|
||||
def message_from_bytes(s, *args, **kws):
|
||||
"""Parse a bytes string into a Message object model.
|
||||
|
||||
Optional _class and strict are passed to the Parser constructor.
|
||||
"""
|
||||
from future.backports.email.parser import BytesParser
|
||||
return BytesParser(*args, **kws).parsebytes(s)
|
||||
|
||||
def message_from_file(fp, *args, **kws):
|
||||
"""Read a file and parse its contents into a Message object model.
|
||||
|
||||
Optional _class and strict are passed to the Parser constructor.
|
||||
"""
|
||||
from future.backports.email.parser import Parser
|
||||
return Parser(*args, **kws).parse(fp)
|
||||
|
||||
def message_from_binary_file(fp, *args, **kws):
|
||||
"""Read a binary file and parse its contents into a Message object model.
|
||||
|
||||
Optional _class and strict are passed to the Parser constructor.
|
||||
"""
|
||||
from future.backports.email.parser import BytesParser
|
||||
return BytesParser(*args, **kws).parse(fp)
|
||||
232
pype/vendor/future/backports/email/_encoded_words.py
vendored
Normal file
232
pype/vendor/future/backports/email/_encoded_words.py
vendored
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
""" Routines for manipulating RFC2047 encoded words.
|
||||
|
||||
This is currently a package-private API, but will be considered for promotion
|
||||
to a public API if there is demand.
|
||||
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import bytes
|
||||
from future.builtins import chr
|
||||
from future.builtins import int
|
||||
from future.builtins import str
|
||||
|
||||
# An ecoded word looks like this:
|
||||
#
|
||||
# =?charset[*lang]?cte?encoded_string?=
|
||||
#
|
||||
# for more information about charset see the charset module. Here it is one
|
||||
# of the preferred MIME charset names (hopefully; you never know when parsing).
|
||||
# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case). In
|
||||
# theory other letters could be used for other encodings, but in practice this
|
||||
# (almost?) never happens. There could be a public API for adding entries
|
||||
# to the CTE tables, but YAGNI for now. 'q' is Quoted Printable, 'b' is
|
||||
# Base64. The meaning of encoded_string should be obvious. 'lang' is optional
|
||||
# as indicated by the brackets (they are not part of the syntax) but is almost
|
||||
# never encountered in practice.
|
||||
#
|
||||
# The general interface for a CTE decoder is that it takes the encoded_string
|
||||
# as its argument, and returns a tuple (cte_decoded_string, defects). The
|
||||
# cte_decoded_string is the original binary that was encoded using the
|
||||
# specified cte. 'defects' is a list of MessageDefect instances indicating any
|
||||
# problems encountered during conversion. 'charset' and 'lang' are the
|
||||
# corresponding strings extracted from the EW, case preserved.
|
||||
#
|
||||
# The general interface for a CTE encoder is that it takes a binary sequence
|
||||
# as input and returns the cte_encoded_string, which is an ascii-only string.
|
||||
#
|
||||
# Each decoder must also supply a length function that takes the binary
|
||||
# sequence as its argument and returns the length of the resulting encoded
|
||||
# string.
|
||||
#
|
||||
# The main API functions for the module are decode, which calls the decoder
|
||||
# referenced by the cte specifier, and encode, which adds the appropriate
|
||||
# RFC 2047 "chrome" to the encoded string, and can optionally automatically
|
||||
# select the shortest possible encoding. See their docstrings below for
|
||||
# details.
|
||||
|
||||
import re
|
||||
import base64
|
||||
import binascii
|
||||
import functools
|
||||
from string import ascii_letters, digits
|
||||
from future.backports.email import errors
|
||||
|
||||
__all__ = ['decode_q',
|
||||
'encode_q',
|
||||
'decode_b',
|
||||
'encode_b',
|
||||
'len_q',
|
||||
'len_b',
|
||||
'decode',
|
||||
'encode',
|
||||
]
|
||||
|
||||
#
|
||||
# Quoted Printable
|
||||
#
|
||||
|
||||
# regex based decoder.
|
||||
_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
|
||||
lambda m: bytes([int(m.group(1), 16)]))
|
||||
|
||||
def decode_q(encoded):
|
||||
encoded = bytes(encoded.replace(b'_', b' '))
|
||||
return _q_byte_subber(encoded), []
|
||||
|
||||
|
||||
# dict mapping bytes to their encoded form
|
||||
class _QByteMap(dict):
|
||||
|
||||
safe = bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii'))
|
||||
|
||||
def __missing__(self, key):
|
||||
if key in self.safe:
|
||||
self[key] = chr(key)
|
||||
else:
|
||||
self[key] = "={:02X}".format(key)
|
||||
return self[key]
|
||||
|
||||
_q_byte_map = _QByteMap()
|
||||
|
||||
# In headers spaces are mapped to '_'.
|
||||
_q_byte_map[ord(' ')] = '_'
|
||||
|
||||
def encode_q(bstring):
|
||||
return str(''.join(_q_byte_map[x] for x in bytes(bstring)))
|
||||
|
||||
def len_q(bstring):
|
||||
return sum(len(_q_byte_map[x]) for x in bytes(bstring))
|
||||
|
||||
|
||||
#
|
||||
# Base64
|
||||
#
|
||||
|
||||
def decode_b(encoded):
|
||||
defects = []
|
||||
pad_err = len(encoded) % 4
|
||||
if pad_err:
|
||||
defects.append(errors.InvalidBase64PaddingDefect())
|
||||
padded_encoded = encoded + b'==='[:4-pad_err]
|
||||
else:
|
||||
padded_encoded = encoded
|
||||
try:
|
||||
# The validate kwarg to b64decode is not supported in Py2.x
|
||||
if not re.match(b'^[A-Za-z0-9+/]*={0,2}$', padded_encoded):
|
||||
raise binascii.Error('Non-base64 digit found')
|
||||
return base64.b64decode(padded_encoded), defects
|
||||
except binascii.Error:
|
||||
# Since we had correct padding, this must an invalid char error.
|
||||
defects = [errors.InvalidBase64CharactersDefect()]
|
||||
# The non-alphabet characters are ignored as far as padding
|
||||
# goes, but we don't know how many there are. So we'll just
|
||||
# try various padding lengths until something works.
|
||||
for i in 0, 1, 2, 3:
|
||||
try:
|
||||
return base64.b64decode(encoded+b'='*i), defects
|
||||
except (binascii.Error, TypeError): # Py2 raises a TypeError
|
||||
if i==0:
|
||||
defects.append(errors.InvalidBase64PaddingDefect())
|
||||
else:
|
||||
# This should never happen.
|
||||
raise AssertionError("unexpected binascii.Error")
|
||||
|
||||
def encode_b(bstring):
|
||||
return base64.b64encode(bstring).decode('ascii')
|
||||
|
||||
def len_b(bstring):
|
||||
groups_of_3, leftover = divmod(len(bstring), 3)
|
||||
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
|
||||
return groups_of_3 * 4 + (4 if leftover else 0)
|
||||
|
||||
|
||||
_cte_decoders = {
|
||||
'q': decode_q,
|
||||
'b': decode_b,
|
||||
}
|
||||
|
||||
def decode(ew):
|
||||
"""Decode encoded word and return (string, charset, lang, defects) tuple.
|
||||
|
||||
An RFC 2047/2243 encoded word has the form:
|
||||
|
||||
=?charset*lang?cte?encoded_string?=
|
||||
|
||||
where '*lang' may be omitted but the other parts may not be.
|
||||
|
||||
This function expects exactly such a string (that is, it does not check the
|
||||
syntax and may raise errors if the string is not well formed), and returns
|
||||
the encoded_string decoded first from its Content Transfer Encoding and
|
||||
then from the resulting bytes into unicode using the specified charset. If
|
||||
the cte-decoded string does not successfully decode using the specified
|
||||
character set, a defect is added to the defects list and the unknown octets
|
||||
are replaced by the unicode 'unknown' character \uFDFF.
|
||||
|
||||
The specified charset and language are returned. The default for language,
|
||||
which is rarely if ever encountered, is the empty string.
|
||||
|
||||
"""
|
||||
_, charset, cte, cte_string, _ = str(ew).split('?')
|
||||
charset, _, lang = charset.partition('*')
|
||||
cte = cte.lower()
|
||||
# Recover the original bytes and do CTE decoding.
|
||||
bstring = cte_string.encode('ascii', 'surrogateescape')
|
||||
bstring, defects = _cte_decoders[cte](bstring)
|
||||
# Turn the CTE decoded bytes into unicode.
|
||||
try:
|
||||
string = bstring.decode(charset)
|
||||
except UnicodeError:
|
||||
defects.append(errors.UndecodableBytesDefect("Encoded word "
|
||||
"contains bytes not decodable using {} charset".format(charset)))
|
||||
string = bstring.decode(charset, 'surrogateescape')
|
||||
except LookupError:
|
||||
string = bstring.decode('ascii', 'surrogateescape')
|
||||
if charset.lower() != 'unknown-8bit':
|
||||
defects.append(errors.CharsetError("Unknown charset {} "
|
||||
"in encoded word; decoded as unknown bytes".format(charset)))
|
||||
return string, charset, lang, defects
|
||||
|
||||
|
||||
_cte_encoders = {
|
||||
'q': encode_q,
|
||||
'b': encode_b,
|
||||
}
|
||||
|
||||
_cte_encode_length = {
|
||||
'q': len_q,
|
||||
'b': len_b,
|
||||
}
|
||||
|
||||
def encode(string, charset='utf-8', encoding=None, lang=''):
|
||||
"""Encode string using the CTE encoding that produces the shorter result.
|
||||
|
||||
Produces an RFC 2047/2243 encoded word of the form:
|
||||
|
||||
=?charset*lang?cte?encoded_string?=
|
||||
|
||||
where '*lang' is omitted unless the 'lang' parameter is given a value.
|
||||
Optional argument charset (defaults to utf-8) specifies the charset to use
|
||||
to encode the string to binary before CTE encoding it. Optional argument
|
||||
'encoding' is the cte specifier for the encoding that should be used ('q'
|
||||
or 'b'); if it is None (the default) the encoding which produces the
|
||||
shortest encoded sequence is used, except that 'q' is preferred if it is up
|
||||
to five characters longer. Optional argument 'lang' (default '') gives the
|
||||
RFC 2243 language string to specify in the encoded word.
|
||||
|
||||
"""
|
||||
string = str(string)
|
||||
if charset == 'unknown-8bit':
|
||||
bstring = string.encode('ascii', 'surrogateescape')
|
||||
else:
|
||||
bstring = string.encode(charset)
|
||||
if encoding is None:
|
||||
qlen = _cte_encode_length['q'](bstring)
|
||||
blen = _cte_encode_length['b'](bstring)
|
||||
# Bias toward q. 5 is arbitrary.
|
||||
encoding = 'q' if qlen - blen < 5 else 'b'
|
||||
encoded = _cte_encoders[encoding](bstring)
|
||||
if lang:
|
||||
lang = '*' + lang
|
||||
return "=?{0}{1}?{2}?{3}?=".format(charset, lang, encoding, encoded)
|
||||
2965
pype/vendor/future/backports/email/_header_value_parser.py
vendored
Normal file
2965
pype/vendor/future/backports/email/_header_value_parser.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
546
pype/vendor/future/backports/email/_parseaddr.py
vendored
Normal file
546
pype/vendor/future/backports/email/_parseaddr.py
vendored
Normal file
|
|
@ -0,0 +1,546 @@
|
|||
# Copyright (C) 2002-2007 Python Software Foundation
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Email address parsing code.
|
||||
|
||||
Lifted directly from rfc822.py. This should eventually be rewritten.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import int
|
||||
|
||||
__all__ = [
|
||||
'mktime_tz',
|
||||
'parsedate',
|
||||
'parsedate_tz',
|
||||
'quote',
|
||||
]
|
||||
|
||||
import time, calendar
|
||||
|
||||
SPACE = ' '
|
||||
EMPTYSTRING = ''
|
||||
COMMASPACE = ', '
|
||||
|
||||
# Parse a date field
|
||||
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
|
||||
'aug', 'sep', 'oct', 'nov', 'dec',
|
||||
'january', 'february', 'march', 'april', 'may', 'june', 'july',
|
||||
'august', 'september', 'october', 'november', 'december']
|
||||
|
||||
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
|
||||
|
||||
# The timezone table does not include the military time zones defined
|
||||
# in RFC822, other than Z. According to RFC1123, the description in
|
||||
# RFC822 gets the signs wrong, so we can't rely on any such time
|
||||
# zones. RFC1123 recommends that numeric timezone indicators be used
|
||||
# instead of timezone names.
|
||||
|
||||
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
|
||||
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
|
||||
'EST': -500, 'EDT': -400, # Eastern
|
||||
'CST': -600, 'CDT': -500, # Central
|
||||
'MST': -700, 'MDT': -600, # Mountain
|
||||
'PST': -800, 'PDT': -700 # Pacific
|
||||
}
|
||||
|
||||
|
||||
def parsedate_tz(data):
|
||||
"""Convert a date string to a time tuple.
|
||||
|
||||
Accounts for military timezones.
|
||||
"""
|
||||
res = _parsedate_tz(data)
|
||||
if not res:
|
||||
return
|
||||
if res[9] is None:
|
||||
res[9] = 0
|
||||
return tuple(res)
|
||||
|
||||
def _parsedate_tz(data):
|
||||
"""Convert date to extended time tuple.
|
||||
|
||||
The last (additional) element is the time zone offset in seconds, except if
|
||||
the timezone was specified as -0000. In that case the last element is
|
||||
None. This indicates a UTC timestamp that explicitly declaims knowledge of
|
||||
the source timezone, as opposed to a +0000 timestamp that indicates the
|
||||
source timezone really was UTC.
|
||||
|
||||
"""
|
||||
if not data:
|
||||
return
|
||||
data = data.split()
|
||||
# The FWS after the comma after the day-of-week is optional, so search and
|
||||
# adjust for this.
|
||||
if data[0].endswith(',') or data[0].lower() in _daynames:
|
||||
# There's a dayname here. Skip it
|
||||
del data[0]
|
||||
else:
|
||||
i = data[0].rfind(',')
|
||||
if i >= 0:
|
||||
data[0] = data[0][i+1:]
|
||||
if len(data) == 3: # RFC 850 date, deprecated
|
||||
stuff = data[0].split('-')
|
||||
if len(stuff) == 3:
|
||||
data = stuff + data[1:]
|
||||
if len(data) == 4:
|
||||
s = data[3]
|
||||
i = s.find('+')
|
||||
if i == -1:
|
||||
i = s.find('-')
|
||||
if i > 0:
|
||||
data[3:] = [s[:i], s[i:]]
|
||||
else:
|
||||
data.append('') # Dummy tz
|
||||
if len(data) < 5:
|
||||
return None
|
||||
data = data[:5]
|
||||
[dd, mm, yy, tm, tz] = data
|
||||
mm = mm.lower()
|
||||
if mm not in _monthnames:
|
||||
dd, mm = mm, dd.lower()
|
||||
if mm not in _monthnames:
|
||||
return None
|
||||
mm = _monthnames.index(mm) + 1
|
||||
if mm > 12:
|
||||
mm -= 12
|
||||
if dd[-1] == ',':
|
||||
dd = dd[:-1]
|
||||
i = yy.find(':')
|
||||
if i > 0:
|
||||
yy, tm = tm, yy
|
||||
if yy[-1] == ',':
|
||||
yy = yy[:-1]
|
||||
if not yy[0].isdigit():
|
||||
yy, tz = tz, yy
|
||||
if tm[-1] == ',':
|
||||
tm = tm[:-1]
|
||||
tm = tm.split(':')
|
||||
if len(tm) == 2:
|
||||
[thh, tmm] = tm
|
||||
tss = '0'
|
||||
elif len(tm) == 3:
|
||||
[thh, tmm, tss] = tm
|
||||
elif len(tm) == 1 and '.' in tm[0]:
|
||||
# Some non-compliant MUAs use '.' to separate time elements.
|
||||
tm = tm[0].split('.')
|
||||
if len(tm) == 2:
|
||||
[thh, tmm] = tm
|
||||
tss = 0
|
||||
elif len(tm) == 3:
|
||||
[thh, tmm, tss] = tm
|
||||
else:
|
||||
return None
|
||||
try:
|
||||
yy = int(yy)
|
||||
dd = int(dd)
|
||||
thh = int(thh)
|
||||
tmm = int(tmm)
|
||||
tss = int(tss)
|
||||
except ValueError:
|
||||
return None
|
||||
# Check for a yy specified in two-digit format, then convert it to the
|
||||
# appropriate four-digit format, according to the POSIX standard. RFC 822
|
||||
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
|
||||
# mandates a 4-digit yy. For more information, see the documentation for
|
||||
# the time module.
|
||||
if yy < 100:
|
||||
# The year is between 1969 and 1999 (inclusive).
|
||||
if yy > 68:
|
||||
yy += 1900
|
||||
# The year is between 2000 and 2068 (inclusive).
|
||||
else:
|
||||
yy += 2000
|
||||
tzoffset = None
|
||||
tz = tz.upper()
|
||||
if tz in _timezones:
|
||||
tzoffset = _timezones[tz]
|
||||
else:
|
||||
try:
|
||||
tzoffset = int(tz)
|
||||
except ValueError:
|
||||
pass
|
||||
if tzoffset==0 and tz.startswith('-'):
|
||||
tzoffset = None
|
||||
# Convert a timezone offset into seconds ; -0500 -> -18000
|
||||
if tzoffset:
|
||||
if tzoffset < 0:
|
||||
tzsign = -1
|
||||
tzoffset = -tzoffset
|
||||
else:
|
||||
tzsign = 1
|
||||
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
|
||||
# Daylight Saving Time flag is set to -1, since DST is unknown.
|
||||
return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
|
||||
|
||||
|
||||
def parsedate(data):
|
||||
"""Convert a time string to a time tuple."""
|
||||
t = parsedate_tz(data)
|
||||
if isinstance(t, tuple):
|
||||
return t[:9]
|
||||
else:
|
||||
return t
|
||||
|
||||
|
||||
def mktime_tz(data):
|
||||
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
|
||||
if data[9] is None:
|
||||
# No zone info, so localtime is better assumption than GMT
|
||||
return time.mktime(data[:8] + (-1,))
|
||||
else:
|
||||
t = calendar.timegm(data)
|
||||
return t - data[9]
|
||||
|
||||
|
||||
def quote(str):
|
||||
"""Prepare string to be used in a quoted string.
|
||||
|
||||
Turns backslash and double quote characters into quoted pairs. These
|
||||
are the only characters that need to be quoted inside a quoted string.
|
||||
Does not add the surrounding double quotes.
|
||||
"""
|
||||
return str.replace('\\', '\\\\').replace('"', '\\"')
|
||||
|
||||
|
||||
class AddrlistClass(object):
|
||||
"""Address parser class by Ben Escoto.
|
||||
|
||||
To understand what this class does, it helps to have a copy of RFC 2822 in
|
||||
front of you.
|
||||
|
||||
Note: this class interface is deprecated and may be removed in the future.
|
||||
Use email.utils.AddressList instead.
|
||||
"""
|
||||
|
||||
def __init__(self, field):
|
||||
"""Initialize a new instance.
|
||||
|
||||
`field' is an unparsed address header field, containing
|
||||
one or more addresses.
|
||||
"""
|
||||
self.specials = '()<>@,:;.\"[]'
|
||||
self.pos = 0
|
||||
self.LWS = ' \t'
|
||||
self.CR = '\r\n'
|
||||
self.FWS = self.LWS + self.CR
|
||||
self.atomends = self.specials + self.LWS + self.CR
|
||||
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
|
||||
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
|
||||
# syntax, so allow dots in phrases.
|
||||
self.phraseends = self.atomends.replace('.', '')
|
||||
self.field = field
|
||||
self.commentlist = []
|
||||
|
||||
def gotonext(self):
|
||||
"""Skip white space and extract comments."""
|
||||
wslist = []
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in self.LWS + '\n\r':
|
||||
if self.field[self.pos] not in '\n\r':
|
||||
wslist.append(self.field[self.pos])
|
||||
self.pos += 1
|
||||
elif self.field[self.pos] == '(':
|
||||
self.commentlist.append(self.getcomment())
|
||||
else:
|
||||
break
|
||||
return EMPTYSTRING.join(wslist)
|
||||
|
||||
def getaddrlist(self):
|
||||
"""Parse all addresses.
|
||||
|
||||
Returns a list containing all of the addresses.
|
||||
"""
|
||||
result = []
|
||||
while self.pos < len(self.field):
|
||||
ad = self.getaddress()
|
||||
if ad:
|
||||
result += ad
|
||||
else:
|
||||
result.append(('', ''))
|
||||
return result
|
||||
|
||||
def getaddress(self):
|
||||
"""Parse the next address."""
|
||||
self.commentlist = []
|
||||
self.gotonext()
|
||||
|
||||
oldpos = self.pos
|
||||
oldcl = self.commentlist
|
||||
plist = self.getphraselist()
|
||||
|
||||
self.gotonext()
|
||||
returnlist = []
|
||||
|
||||
if self.pos >= len(self.field):
|
||||
# Bad email address technically, no domain.
|
||||
if plist:
|
||||
returnlist = [(SPACE.join(self.commentlist), plist[0])]
|
||||
|
||||
elif self.field[self.pos] in '.@':
|
||||
# email address is just an addrspec
|
||||
# this isn't very efficient since we start over
|
||||
self.pos = oldpos
|
||||
self.commentlist = oldcl
|
||||
addrspec = self.getaddrspec()
|
||||
returnlist = [(SPACE.join(self.commentlist), addrspec)]
|
||||
|
||||
elif self.field[self.pos] == ':':
|
||||
# address is a group
|
||||
returnlist = []
|
||||
|
||||
fieldlen = len(self.field)
|
||||
self.pos += 1
|
||||
while self.pos < len(self.field):
|
||||
self.gotonext()
|
||||
if self.pos < fieldlen and self.field[self.pos] == ';':
|
||||
self.pos += 1
|
||||
break
|
||||
returnlist = returnlist + self.getaddress()
|
||||
|
||||
elif self.field[self.pos] == '<':
|
||||
# Address is a phrase then a route addr
|
||||
routeaddr = self.getrouteaddr()
|
||||
|
||||
if self.commentlist:
|
||||
returnlist = [(SPACE.join(plist) + ' (' +
|
||||
' '.join(self.commentlist) + ')', routeaddr)]
|
||||
else:
|
||||
returnlist = [(SPACE.join(plist), routeaddr)]
|
||||
|
||||
else:
|
||||
if plist:
|
||||
returnlist = [(SPACE.join(self.commentlist), plist[0])]
|
||||
elif self.field[self.pos] in self.specials:
|
||||
self.pos += 1
|
||||
|
||||
self.gotonext()
|
||||
if self.pos < len(self.field) and self.field[self.pos] == ',':
|
||||
self.pos += 1
|
||||
return returnlist
|
||||
|
||||
def getrouteaddr(self):
|
||||
"""Parse a route address (Return-path value).
|
||||
|
||||
This method just skips all the route stuff and returns the addrspec.
|
||||
"""
|
||||
if self.field[self.pos] != '<':
|
||||
return
|
||||
|
||||
expectroute = False
|
||||
self.pos += 1
|
||||
self.gotonext()
|
||||
adlist = ''
|
||||
while self.pos < len(self.field):
|
||||
if expectroute:
|
||||
self.getdomain()
|
||||
expectroute = False
|
||||
elif self.field[self.pos] == '>':
|
||||
self.pos += 1
|
||||
break
|
||||
elif self.field[self.pos] == '@':
|
||||
self.pos += 1
|
||||
expectroute = True
|
||||
elif self.field[self.pos] == ':':
|
||||
self.pos += 1
|
||||
else:
|
||||
adlist = self.getaddrspec()
|
||||
self.pos += 1
|
||||
break
|
||||
self.gotonext()
|
||||
|
||||
return adlist
|
||||
|
||||
def getaddrspec(self):
|
||||
"""Parse an RFC 2822 addr-spec."""
|
||||
aslist = []
|
||||
|
||||
self.gotonext()
|
||||
while self.pos < len(self.field):
|
||||
preserve_ws = True
|
||||
if self.field[self.pos] == '.':
|
||||
if aslist and not aslist[-1].strip():
|
||||
aslist.pop()
|
||||
aslist.append('.')
|
||||
self.pos += 1
|
||||
preserve_ws = False
|
||||
elif self.field[self.pos] == '"':
|
||||
aslist.append('"%s"' % quote(self.getquote()))
|
||||
elif self.field[self.pos] in self.atomends:
|
||||
if aslist and not aslist[-1].strip():
|
||||
aslist.pop()
|
||||
break
|
||||
else:
|
||||
aslist.append(self.getatom())
|
||||
ws = self.gotonext()
|
||||
if preserve_ws and ws:
|
||||
aslist.append(ws)
|
||||
|
||||
if self.pos >= len(self.field) or self.field[self.pos] != '@':
|
||||
return EMPTYSTRING.join(aslist)
|
||||
|
||||
aslist.append('@')
|
||||
self.pos += 1
|
||||
self.gotonext()
|
||||
return EMPTYSTRING.join(aslist) + self.getdomain()
|
||||
|
||||
def getdomain(self):
|
||||
"""Get the complete domain name from an address."""
|
||||
sdlist = []
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in self.LWS:
|
||||
self.pos += 1
|
||||
elif self.field[self.pos] == '(':
|
||||
self.commentlist.append(self.getcomment())
|
||||
elif self.field[self.pos] == '[':
|
||||
sdlist.append(self.getdomainliteral())
|
||||
elif self.field[self.pos] == '.':
|
||||
self.pos += 1
|
||||
sdlist.append('.')
|
||||
elif self.field[self.pos] in self.atomends:
|
||||
break
|
||||
else:
|
||||
sdlist.append(self.getatom())
|
||||
return EMPTYSTRING.join(sdlist)
|
||||
|
||||
def getdelimited(self, beginchar, endchars, allowcomments=True):
|
||||
"""Parse a header fragment delimited by special characters.
|
||||
|
||||
`beginchar' is the start character for the fragment.
|
||||
If self is not looking at an instance of `beginchar' then
|
||||
getdelimited returns the empty string.
|
||||
|
||||
`endchars' is a sequence of allowable end-delimiting characters.
|
||||
Parsing stops when one of these is encountered.
|
||||
|
||||
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
|
||||
within the parsed fragment.
|
||||
"""
|
||||
if self.field[self.pos] != beginchar:
|
||||
return ''
|
||||
|
||||
slist = ['']
|
||||
quote = False
|
||||
self.pos += 1
|
||||
while self.pos < len(self.field):
|
||||
if quote:
|
||||
slist.append(self.field[self.pos])
|
||||
quote = False
|
||||
elif self.field[self.pos] in endchars:
|
||||
self.pos += 1
|
||||
break
|
||||
elif allowcomments and self.field[self.pos] == '(':
|
||||
slist.append(self.getcomment())
|
||||
continue # have already advanced pos from getcomment
|
||||
elif self.field[self.pos] == '\\':
|
||||
quote = True
|
||||
else:
|
||||
slist.append(self.field[self.pos])
|
||||
self.pos += 1
|
||||
|
||||
return EMPTYSTRING.join(slist)
|
||||
|
||||
def getquote(self):
|
||||
"""Get a quote-delimited fragment from self's field."""
|
||||
return self.getdelimited('"', '"\r', False)
|
||||
|
||||
def getcomment(self):
|
||||
"""Get a parenthesis-delimited fragment from self's field."""
|
||||
return self.getdelimited('(', ')\r', True)
|
||||
|
||||
def getdomainliteral(self):
|
||||
"""Parse an RFC 2822 domain-literal."""
|
||||
return '[%s]' % self.getdelimited('[', ']\r', False)
|
||||
|
||||
def getatom(self, atomends=None):
|
||||
"""Parse an RFC 2822 atom.
|
||||
|
||||
Optional atomends specifies a different set of end token delimiters
|
||||
(the default is to use self.atomends). This is used e.g. in
|
||||
getphraselist() since phrase endings must not include the `.' (which
|
||||
is legal in phrases)."""
|
||||
atomlist = ['']
|
||||
if atomends is None:
|
||||
atomends = self.atomends
|
||||
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in atomends:
|
||||
break
|
||||
else:
|
||||
atomlist.append(self.field[self.pos])
|
||||
self.pos += 1
|
||||
|
||||
return EMPTYSTRING.join(atomlist)
|
||||
|
||||
def getphraselist(self):
|
||||
"""Parse a sequence of RFC 2822 phrases.
|
||||
|
||||
A phrase is a sequence of words, which are in turn either RFC 2822
|
||||
atoms or quoted-strings. Phrases are canonicalized by squeezing all
|
||||
runs of continuous whitespace into one space.
|
||||
"""
|
||||
plist = []
|
||||
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in self.FWS:
|
||||
self.pos += 1
|
||||
elif self.field[self.pos] == '"':
|
||||
plist.append(self.getquote())
|
||||
elif self.field[self.pos] == '(':
|
||||
self.commentlist.append(self.getcomment())
|
||||
elif self.field[self.pos] in self.phraseends:
|
||||
break
|
||||
else:
|
||||
plist.append(self.getatom(self.phraseends))
|
||||
|
||||
return plist
|
||||
|
||||
class AddressList(AddrlistClass):
|
||||
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
|
||||
def __init__(self, field):
|
||||
AddrlistClass.__init__(self, field)
|
||||
if field:
|
||||
self.addresslist = self.getaddrlist()
|
||||
else:
|
||||
self.addresslist = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.addresslist)
|
||||
|
||||
def __add__(self, other):
|
||||
# Set union
|
||||
newaddr = AddressList(None)
|
||||
newaddr.addresslist = self.addresslist[:]
|
||||
for x in other.addresslist:
|
||||
if not x in self.addresslist:
|
||||
newaddr.addresslist.append(x)
|
||||
return newaddr
|
||||
|
||||
def __iadd__(self, other):
|
||||
# Set union, in-place
|
||||
for x in other.addresslist:
|
||||
if not x in self.addresslist:
|
||||
self.addresslist.append(x)
|
||||
return self
|
||||
|
||||
def __sub__(self, other):
|
||||
# Set difference
|
||||
newaddr = AddressList(None)
|
||||
for x in self.addresslist:
|
||||
if not x in other.addresslist:
|
||||
newaddr.addresslist.append(x)
|
||||
return newaddr
|
||||
|
||||
def __isub__(self, other):
|
||||
# Set difference, in-place
|
||||
for x in other.addresslist:
|
||||
if x in self.addresslist:
|
||||
self.addresslist.remove(x)
|
||||
return self
|
||||
|
||||
def __getitem__(self, index):
|
||||
# Make indexing, slices, and 'in' work
|
||||
return self.addresslist[index]
|
||||
365
pype/vendor/future/backports/email/_policybase.py
vendored
Normal file
365
pype/vendor/future/backports/email/_policybase.py
vendored
Normal file
|
|
@ -0,0 +1,365 @@
|
|||
"""Policy framework for the email package.
|
||||
|
||||
Allows fine grained feature control of how the package parses and emits data.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import super
|
||||
from future.builtins import str
|
||||
from future.utils import with_metaclass
|
||||
|
||||
import abc
|
||||
from future.backports.email import header
|
||||
from future.backports.email import charset as _charset
|
||||
from future.backports.email.utils import _has_surrogates
|
||||
|
||||
__all__ = [
|
||||
'Policy',
|
||||
'Compat32',
|
||||
'compat32',
|
||||
]
|
||||
|
||||
|
||||
class _PolicyBase(object):
|
||||
|
||||
"""Policy Object basic framework.
|
||||
|
||||
This class is useless unless subclassed. A subclass should define
|
||||
class attributes with defaults for any values that are to be
|
||||
managed by the Policy object. The constructor will then allow
|
||||
non-default values to be set for these attributes at instance
|
||||
creation time. The instance will be callable, taking these same
|
||||
attributes keyword arguments, and returning a new instance
|
||||
identical to the called instance except for those values changed
|
||||
by the keyword arguments. Instances may be added, yielding new
|
||||
instances with any non-default values from the right hand
|
||||
operand overriding those in the left hand operand. That is,
|
||||
|
||||
A + B == A(<non-default values of B>)
|
||||
|
||||
The repr of an instance can be used to reconstruct the object
|
||||
if and only if the repr of the values can be used to reconstruct
|
||||
those values.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, **kw):
|
||||
"""Create new Policy, possibly overriding some defaults.
|
||||
|
||||
See class docstring for a list of overridable attributes.
|
||||
|
||||
"""
|
||||
for name, value in kw.items():
|
||||
if hasattr(self, name):
|
||||
super(_PolicyBase,self).__setattr__(name, value)
|
||||
else:
|
||||
raise TypeError(
|
||||
"{!r} is an invalid keyword argument for {}".format(
|
||||
name, self.__class__.__name__))
|
||||
|
||||
def __repr__(self):
|
||||
args = [ "{}={!r}".format(name, value)
|
||||
for name, value in self.__dict__.items() ]
|
||||
return "{}({})".format(self.__class__.__name__, ', '.join(args))
|
||||
|
||||
def clone(self, **kw):
|
||||
"""Return a new instance with specified attributes changed.
|
||||
|
||||
The new instance has the same attribute values as the current object,
|
||||
except for the changes passed in as keyword arguments.
|
||||
|
||||
"""
|
||||
newpolicy = self.__class__.__new__(self.__class__)
|
||||
for attr, value in self.__dict__.items():
|
||||
object.__setattr__(newpolicy, attr, value)
|
||||
for attr, value in kw.items():
|
||||
if not hasattr(self, attr):
|
||||
raise TypeError(
|
||||
"{!r} is an invalid keyword argument for {}".format(
|
||||
attr, self.__class__.__name__))
|
||||
object.__setattr__(newpolicy, attr, value)
|
||||
return newpolicy
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if hasattr(self, name):
|
||||
msg = "{!r} object attribute {!r} is read-only"
|
||||
else:
|
||||
msg = "{!r} object has no attribute {!r}"
|
||||
raise AttributeError(msg.format(self.__class__.__name__, name))
|
||||
|
||||
def __add__(self, other):
|
||||
"""Non-default values from right operand override those from left.
|
||||
|
||||
The object returned is a new instance of the subclass.
|
||||
|
||||
"""
|
||||
return self.clone(**other.__dict__)
|
||||
|
||||
|
||||
def _append_doc(doc, added_doc):
|
||||
doc = doc.rsplit('\n', 1)[0]
|
||||
added_doc = added_doc.split('\n', 1)[1]
|
||||
return doc + '\n' + added_doc
|
||||
|
||||
def _extend_docstrings(cls):
|
||||
if cls.__doc__ and cls.__doc__.startswith('+'):
|
||||
cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
|
||||
for name, attr in cls.__dict__.items():
|
||||
if attr.__doc__ and attr.__doc__.startswith('+'):
|
||||
for c in (c for base in cls.__bases__ for c in base.mro()):
|
||||
doc = getattr(getattr(c, name), '__doc__')
|
||||
if doc:
|
||||
attr.__doc__ = _append_doc(doc, attr.__doc__)
|
||||
break
|
||||
return cls
|
||||
|
||||
|
||||
class Policy(with_metaclass(abc.ABCMeta, _PolicyBase)):
|
||||
|
||||
r"""Controls for how messages are interpreted and formatted.
|
||||
|
||||
Most of the classes and many of the methods in the email package accept
|
||||
Policy objects as parameters. A Policy object contains a set of values and
|
||||
functions that control how input is interpreted and how output is rendered.
|
||||
For example, the parameter 'raise_on_defect' controls whether or not an RFC
|
||||
violation results in an error being raised or not, while 'max_line_length'
|
||||
controls the maximum length of output lines when a Message is serialized.
|
||||
|
||||
Any valid attribute may be overridden when a Policy is created by passing
|
||||
it as a keyword argument to the constructor. Policy objects are immutable,
|
||||
but a new Policy object can be created with only certain values changed by
|
||||
calling the Policy instance with keyword arguments. Policy objects can
|
||||
also be added, producing a new Policy object in which the non-default
|
||||
attributes set in the right hand operand overwrite those specified in the
|
||||
left operand.
|
||||
|
||||
Settable attributes:
|
||||
|
||||
raise_on_defect -- If true, then defects should be raised as errors.
|
||||
Default: False.
|
||||
|
||||
linesep -- string containing the value to use as separation
|
||||
between output lines. Default '\n'.
|
||||
|
||||
cte_type -- Type of allowed content transfer encodings
|
||||
|
||||
7bit -- ASCII only
|
||||
8bit -- Content-Transfer-Encoding: 8bit is allowed
|
||||
|
||||
Default: 8bit. Also controls the disposition of
|
||||
(RFC invalid) binary data in headers; see the
|
||||
documentation of the binary_fold method.
|
||||
|
||||
max_line_length -- maximum length of lines, excluding 'linesep',
|
||||
during serialization. None or 0 means no line
|
||||
wrapping is done. Default is 78.
|
||||
|
||||
"""
|
||||
|
||||
raise_on_defect = False
|
||||
linesep = '\n'
|
||||
cte_type = '8bit'
|
||||
max_line_length = 78
|
||||
|
||||
def handle_defect(self, obj, defect):
|
||||
"""Based on policy, either raise defect or call register_defect.
|
||||
|
||||
handle_defect(obj, defect)
|
||||
|
||||
defect should be a Defect subclass, but in any case must be an
|
||||
Exception subclass. obj is the object on which the defect should be
|
||||
registered if it is not raised. If the raise_on_defect is True, the
|
||||
defect is raised as an error, otherwise the object and the defect are
|
||||
passed to register_defect.
|
||||
|
||||
This method is intended to be called by parsers that discover defects.
|
||||
The email package parsers always call it with Defect instances.
|
||||
|
||||
"""
|
||||
if self.raise_on_defect:
|
||||
raise defect
|
||||
self.register_defect(obj, defect)
|
||||
|
||||
def register_defect(self, obj, defect):
|
||||
"""Record 'defect' on 'obj'.
|
||||
|
||||
Called by handle_defect if raise_on_defect is False. This method is
|
||||
part of the Policy API so that Policy subclasses can implement custom
|
||||
defect handling. The default implementation calls the append method of
|
||||
the defects attribute of obj. The objects used by the email package by
|
||||
default that get passed to this method will always have a defects
|
||||
attribute with an append method.
|
||||
|
||||
"""
|
||||
obj.defects.append(defect)
|
||||
|
||||
def header_max_count(self, name):
|
||||
"""Return the maximum allowed number of headers named 'name'.
|
||||
|
||||
Called when a header is added to a Message object. If the returned
|
||||
value is not 0 or None, and there are already a number of headers with
|
||||
the name 'name' equal to the value returned, a ValueError is raised.
|
||||
|
||||
Because the default behavior of Message's __setitem__ is to append the
|
||||
value to the list of headers, it is easy to create duplicate headers
|
||||
without realizing it. This method allows certain headers to be limited
|
||||
in the number of instances of that header that may be added to a
|
||||
Message programmatically. (The limit is not observed by the parser,
|
||||
which will faithfully produce as many headers as exist in the message
|
||||
being parsed.)
|
||||
|
||||
The default implementation returns None for all header names.
|
||||
"""
|
||||
return None
|
||||
|
||||
@abc.abstractmethod
|
||||
def header_source_parse(self, sourcelines):
|
||||
"""Given a list of linesep terminated strings constituting the lines of
|
||||
a single header, return the (name, value) tuple that should be stored
|
||||
in the model. The input lines should retain their terminating linesep
|
||||
characters. The lines passed in by the email package may contain
|
||||
surrogateescaped binary data.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def header_store_parse(self, name, value):
|
||||
"""Given the header name and the value provided by the application
|
||||
program, return the (name, value) that should be stored in the model.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def header_fetch_parse(self, name, value):
|
||||
"""Given the header name and the value from the model, return the value
|
||||
to be returned to the application program that is requesting that
|
||||
header. The value passed in by the email package may contain
|
||||
surrogateescaped binary data if the lines were parsed by a BytesParser.
|
||||
The returned value should not contain any surrogateescaped data.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def fold(self, name, value):
|
||||
"""Given the header name and the value from the model, return a string
|
||||
containing linesep characters that implement the folding of the header
|
||||
according to the policy controls. The value passed in by the email
|
||||
package may contain surrogateescaped binary data if the lines were
|
||||
parsed by a BytesParser. The returned value should not contain any
|
||||
surrogateescaped data.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def fold_binary(self, name, value):
|
||||
"""Given the header name and the value from the model, return binary
|
||||
data containing linesep characters that implement the folding of the
|
||||
header according to the policy controls. The value passed in by the
|
||||
email package may contain surrogateescaped binary data.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@_extend_docstrings
|
||||
class Compat32(Policy):
|
||||
|
||||
"""+
|
||||
This particular policy is the backward compatibility Policy. It
|
||||
replicates the behavior of the email package version 5.1.
|
||||
"""
|
||||
|
||||
def _sanitize_header(self, name, value):
|
||||
# If the header value contains surrogates, return a Header using
|
||||
# the unknown-8bit charset to encode the bytes as encoded words.
|
||||
if not isinstance(value, str):
|
||||
# Assume it is already a header object
|
||||
return value
|
||||
if _has_surrogates(value):
|
||||
return header.Header(value, charset=_charset.UNKNOWN8BIT,
|
||||
header_name=name)
|
||||
else:
|
||||
return value
|
||||
|
||||
def header_source_parse(self, sourcelines):
|
||||
"""+
|
||||
The name is parsed as everything up to the ':' and returned unmodified.
|
||||
The value is determined by stripping leading whitespace off the
|
||||
remainder of the first line, joining all subsequent lines together, and
|
||||
stripping any trailing carriage return or linefeed characters.
|
||||
|
||||
"""
|
||||
name, value = sourcelines[0].split(':', 1)
|
||||
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
|
||||
return (name, value.rstrip('\r\n'))
|
||||
|
||||
def header_store_parse(self, name, value):
|
||||
"""+
|
||||
The name and value are returned unmodified.
|
||||
"""
|
||||
return (name, value)
|
||||
|
||||
def header_fetch_parse(self, name, value):
|
||||
"""+
|
||||
If the value contains binary data, it is converted into a Header object
|
||||
using the unknown-8bit charset. Otherwise it is returned unmodified.
|
||||
"""
|
||||
return self._sanitize_header(name, value)
|
||||
|
||||
def fold(self, name, value):
|
||||
"""+
|
||||
Headers are folded using the Header folding algorithm, which preserves
|
||||
existing line breaks in the value, and wraps each resulting line to the
|
||||
max_line_length. Non-ASCII binary data are CTE encoded using the
|
||||
unknown-8bit charset.
|
||||
|
||||
"""
|
||||
return self._fold(name, value, sanitize=True)
|
||||
|
||||
def fold_binary(self, name, value):
|
||||
"""+
|
||||
Headers are folded using the Header folding algorithm, which preserves
|
||||
existing line breaks in the value, and wraps each resulting line to the
|
||||
max_line_length. If cte_type is 7bit, non-ascii binary data is CTE
|
||||
encoded using the unknown-8bit charset. Otherwise the original source
|
||||
header is used, with its existing line breaks and/or binary data.
|
||||
|
||||
"""
|
||||
folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
|
||||
return folded.encode('ascii', 'surrogateescape')
|
||||
|
||||
def _fold(self, name, value, sanitize):
|
||||
parts = []
|
||||
parts.append('%s: ' % name)
|
||||
if isinstance(value, str):
|
||||
if _has_surrogates(value):
|
||||
if sanitize:
|
||||
h = header.Header(value,
|
||||
charset=_charset.UNKNOWN8BIT,
|
||||
header_name=name)
|
||||
else:
|
||||
# If we have raw 8bit data in a byte string, we have no idea
|
||||
# what the encoding is. There is no safe way to split this
|
||||
# string. If it's ascii-subset, then we could do a normal
|
||||
# ascii split, but if it's multibyte then we could break the
|
||||
# string. There's no way to know so the least harm seems to
|
||||
# be to not split the string and risk it being too long.
|
||||
parts.append(value)
|
||||
h = None
|
||||
else:
|
||||
h = header.Header(value, header_name=name)
|
||||
else:
|
||||
# Assume it is a Header-like object.
|
||||
h = value
|
||||
if h is not None:
|
||||
parts.append(h.encode(linesep=self.linesep,
|
||||
maxlinelen=self.max_line_length))
|
||||
parts.append(self.linesep)
|
||||
return ''.join(parts)
|
||||
|
||||
|
||||
compat32 = Compat32()
|
||||
120
pype/vendor/future/backports/email/base64mime.py
vendored
Normal file
120
pype/vendor/future/backports/email/base64mime.py
vendored
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
# Copyright (C) 2002-2007 Python Software Foundation
|
||||
# Author: Ben Gertzfield
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Base64 content transfer encoding per RFCs 2045-2047.
|
||||
|
||||
This module handles the content transfer encoding method defined in RFC 2045
|
||||
to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
|
||||
characters encoding known as Base64.
|
||||
|
||||
It is used in the MIME standards for email to attach images, audio, and text
|
||||
using some 8-bit character sets to messages.
|
||||
|
||||
This module provides an interface to encode and decode both headers and bodies
|
||||
with Base64 encoding.
|
||||
|
||||
RFC 2045 defines a method for including character set information in an
|
||||
`encoded-word' in a header. This method is commonly used for 8-bit real names
|
||||
in To:, From:, Cc:, etc. fields, as well as Subject: lines.
|
||||
|
||||
This module does not do the line wrapping or end-of-line character conversion
|
||||
necessary for proper internationalized headers; it only does dumb encoding and
|
||||
decoding. To deal with the various line wrapping issues, use the email.header
|
||||
module.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import range
|
||||
from future.builtins import bytes
|
||||
|
||||
__all__ = [
|
||||
'body_decode',
|
||||
'body_encode',
|
||||
'decode',
|
||||
'decodestring',
|
||||
'header_encode',
|
||||
'header_length',
|
||||
]
|
||||
|
||||
|
||||
from base64 import b64encode
|
||||
from binascii import b2a_base64, a2b_base64
|
||||
|
||||
CRLF = '\r\n'
|
||||
NL = '\n'
|
||||
EMPTYSTRING = ''
|
||||
|
||||
# See also Charset.py
|
||||
MISC_LEN = 7
|
||||
|
||||
|
||||
# Helpers
|
||||
def header_length(bytearray):
|
||||
"""Return the length of s when it is encoded with base64."""
|
||||
groups_of_3, leftover = divmod(len(bytearray), 3)
|
||||
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
|
||||
n = groups_of_3 * 4
|
||||
if leftover:
|
||||
n += 4
|
||||
return n
|
||||
|
||||
|
||||
def header_encode(header_bytes, charset='iso-8859-1'):
|
||||
"""Encode a single header line with Base64 encoding in a given charset.
|
||||
|
||||
charset names the character set to use to encode the header. It defaults
|
||||
to iso-8859-1. Base64 encoding is defined in RFC 2045.
|
||||
"""
|
||||
if not header_bytes:
|
||||
return ""
|
||||
if isinstance(header_bytes, str):
|
||||
header_bytes = header_bytes.encode(charset)
|
||||
encoded = b64encode(header_bytes).decode("ascii")
|
||||
return '=?%s?b?%s?=' % (charset, encoded)
|
||||
|
||||
|
||||
def body_encode(s, maxlinelen=76, eol=NL):
|
||||
r"""Encode a string with base64.
|
||||
|
||||
Each line will be wrapped at, at most, maxlinelen characters (defaults to
|
||||
76 characters).
|
||||
|
||||
Each line of encoded text will end with eol, which defaults to "\n". Set
|
||||
this to "\r\n" if you will be using the result of this function directly
|
||||
in an email.
|
||||
"""
|
||||
if not s:
|
||||
return s
|
||||
|
||||
encvec = []
|
||||
max_unencoded = maxlinelen * 3 // 4
|
||||
for i in range(0, len(s), max_unencoded):
|
||||
# BAW: should encode() inherit b2a_base64()'s dubious behavior in
|
||||
# adding a newline to the encoded string?
|
||||
enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii")
|
||||
if enc.endswith(NL) and eol != NL:
|
||||
enc = enc[:-1] + eol
|
||||
encvec.append(enc)
|
||||
return EMPTYSTRING.join(encvec)
|
||||
|
||||
|
||||
def decode(string):
|
||||
"""Decode a raw base64 string, returning a bytes object.
|
||||
|
||||
This function does not parse a full MIME header value encoded with
|
||||
base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
|
||||
level email.header class for that functionality.
|
||||
"""
|
||||
if not string:
|
||||
return bytes()
|
||||
elif isinstance(string, str):
|
||||
return a2b_base64(string.encode('raw-unicode-escape'))
|
||||
else:
|
||||
return a2b_base64(string)
|
||||
|
||||
|
||||
# For convenience and backwards compatibility w/ standard base64 module
|
||||
body_decode = decode
|
||||
decodestring = decode
|
||||
409
pype/vendor/future/backports/email/charset.py
vendored
Normal file
409
pype/vendor/future/backports/email/charset.py
vendored
Normal file
|
|
@ -0,0 +1,409 @@
|
|||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import str
|
||||
from future.builtins import next
|
||||
|
||||
# Copyright (C) 2001-2007 Python Software Foundation
|
||||
# Author: Ben Gertzfield, Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
__all__ = [
|
||||
'Charset',
|
||||
'add_alias',
|
||||
'add_charset',
|
||||
'add_codec',
|
||||
]
|
||||
|
||||
from functools import partial
|
||||
|
||||
from future.backports import email
|
||||
from future.backports.email import errors
|
||||
from future.backports.email.encoders import encode_7or8bit
|
||||
|
||||
|
||||
# Flags for types of header encodings
|
||||
QP = 1 # Quoted-Printable
|
||||
BASE64 = 2 # Base64
|
||||
SHORTEST = 3 # the shorter of QP and base64, but only for headers
|
||||
|
||||
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
|
||||
RFC2047_CHROME_LEN = 7
|
||||
|
||||
DEFAULT_CHARSET = 'us-ascii'
|
||||
UNKNOWN8BIT = 'unknown-8bit'
|
||||
EMPTYSTRING = ''
|
||||
|
||||
|
||||
# Defaults
|
||||
CHARSETS = {
|
||||
# input header enc body enc output conv
|
||||
'iso-8859-1': (QP, QP, None),
|
||||
'iso-8859-2': (QP, QP, None),
|
||||
'iso-8859-3': (QP, QP, None),
|
||||
'iso-8859-4': (QP, QP, None),
|
||||
# iso-8859-5 is Cyrillic, and not especially used
|
||||
# iso-8859-6 is Arabic, also not particularly used
|
||||
# iso-8859-7 is Greek, QP will not make it readable
|
||||
# iso-8859-8 is Hebrew, QP will not make it readable
|
||||
'iso-8859-9': (QP, QP, None),
|
||||
'iso-8859-10': (QP, QP, None),
|
||||
# iso-8859-11 is Thai, QP will not make it readable
|
||||
'iso-8859-13': (QP, QP, None),
|
||||
'iso-8859-14': (QP, QP, None),
|
||||
'iso-8859-15': (QP, QP, None),
|
||||
'iso-8859-16': (QP, QP, None),
|
||||
'windows-1252':(QP, QP, None),
|
||||
'viscii': (QP, QP, None),
|
||||
'us-ascii': (None, None, None),
|
||||
'big5': (BASE64, BASE64, None),
|
||||
'gb2312': (BASE64, BASE64, None),
|
||||
'euc-jp': (BASE64, None, 'iso-2022-jp'),
|
||||
'shift_jis': (BASE64, None, 'iso-2022-jp'),
|
||||
'iso-2022-jp': (BASE64, None, None),
|
||||
'koi8-r': (BASE64, BASE64, None),
|
||||
'utf-8': (SHORTEST, BASE64, 'utf-8'),
|
||||
}
|
||||
|
||||
# Aliases for other commonly-used names for character sets. Map
|
||||
# them to the real ones used in email.
|
||||
ALIASES = {
|
||||
'latin_1': 'iso-8859-1',
|
||||
'latin-1': 'iso-8859-1',
|
||||
'latin_2': 'iso-8859-2',
|
||||
'latin-2': 'iso-8859-2',
|
||||
'latin_3': 'iso-8859-3',
|
||||
'latin-3': 'iso-8859-3',
|
||||
'latin_4': 'iso-8859-4',
|
||||
'latin-4': 'iso-8859-4',
|
||||
'latin_5': 'iso-8859-9',
|
||||
'latin-5': 'iso-8859-9',
|
||||
'latin_6': 'iso-8859-10',
|
||||
'latin-6': 'iso-8859-10',
|
||||
'latin_7': 'iso-8859-13',
|
||||
'latin-7': 'iso-8859-13',
|
||||
'latin_8': 'iso-8859-14',
|
||||
'latin-8': 'iso-8859-14',
|
||||
'latin_9': 'iso-8859-15',
|
||||
'latin-9': 'iso-8859-15',
|
||||
'latin_10':'iso-8859-16',
|
||||
'latin-10':'iso-8859-16',
|
||||
'cp949': 'ks_c_5601-1987',
|
||||
'euc_jp': 'euc-jp',
|
||||
'euc_kr': 'euc-kr',
|
||||
'ascii': 'us-ascii',
|
||||
}
|
||||
|
||||
|
||||
# Map charsets to their Unicode codec strings.
|
||||
CODEC_MAP = {
|
||||
'gb2312': 'eucgb2312_cn',
|
||||
'big5': 'big5_tw',
|
||||
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
|
||||
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
|
||||
# Let that stuff pass through without conversion to/from Unicode.
|
||||
'us-ascii': None,
|
||||
}
|
||||
|
||||
|
||||
# Convenience functions for extending the above mappings
|
||||
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
|
||||
"""Add character set properties to the global registry.
|
||||
|
||||
charset is the input character set, and must be the canonical name of a
|
||||
character set.
|
||||
|
||||
Optional header_enc and body_enc is either Charset.QP for
|
||||
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
|
||||
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
|
||||
is only valid for header_enc. It describes how message headers and
|
||||
message bodies in the input charset are to be encoded. Default is no
|
||||
encoding.
|
||||
|
||||
Optional output_charset is the character set that the output should be
|
||||
in. Conversions will proceed from input charset, to Unicode, to the
|
||||
output charset when the method Charset.convert() is called. The default
|
||||
is to output in the same character set as the input.
|
||||
|
||||
Both input_charset and output_charset must have Unicode codec entries in
|
||||
the module's charset-to-codec mapping; use add_codec(charset, codecname)
|
||||
to add codecs the module does not know about. See the codecs module's
|
||||
documentation for more information.
|
||||
"""
|
||||
if body_enc == SHORTEST:
|
||||
raise ValueError('SHORTEST not allowed for body_enc')
|
||||
CHARSETS[charset] = (header_enc, body_enc, output_charset)
|
||||
|
||||
|
||||
def add_alias(alias, canonical):
|
||||
"""Add a character set alias.
|
||||
|
||||
alias is the alias name, e.g. latin-1
|
||||
canonical is the character set's canonical name, e.g. iso-8859-1
|
||||
"""
|
||||
ALIASES[alias] = canonical
|
||||
|
||||
|
||||
def add_codec(charset, codecname):
|
||||
"""Add a codec that map characters in the given charset to/from Unicode.
|
||||
|
||||
charset is the canonical name of a character set. codecname is the name
|
||||
of a Python codec, as appropriate for the second argument to the unicode()
|
||||
built-in, or to the encode() method of a Unicode string.
|
||||
"""
|
||||
CODEC_MAP[charset] = codecname
|
||||
|
||||
|
||||
# Convenience function for encoding strings, taking into account
|
||||
# that they might be unknown-8bit (ie: have surrogate-escaped bytes)
|
||||
def _encode(string, codec):
|
||||
string = str(string)
|
||||
if codec == UNKNOWN8BIT:
|
||||
return string.encode('ascii', 'surrogateescape')
|
||||
else:
|
||||
return string.encode(codec)
|
||||
|
||||
|
||||
class Charset(object):
|
||||
"""Map character sets to their email properties.
|
||||
|
||||
This class provides information about the requirements imposed on email
|
||||
for a specific character set. It also provides convenience routines for
|
||||
converting between character sets, given the availability of the
|
||||
applicable codecs. Given a character set, it will do its best to provide
|
||||
information on how to use that character set in an email in an
|
||||
RFC-compliant way.
|
||||
|
||||
Certain character sets must be encoded with quoted-printable or base64
|
||||
when used in email headers or bodies. Certain character sets must be
|
||||
converted outright, and are not allowed in email. Instances of this
|
||||
module expose the following information about a character set:
|
||||
|
||||
input_charset: The initial character set specified. Common aliases
|
||||
are converted to their `official' email names (e.g. latin_1
|
||||
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
|
||||
|
||||
header_encoding: If the character set must be encoded before it can be
|
||||
used in an email header, this attribute will be set to
|
||||
Charset.QP (for quoted-printable), Charset.BASE64 (for
|
||||
base64 encoding), or Charset.SHORTEST for the shortest of
|
||||
QP or BASE64 encoding. Otherwise, it will be None.
|
||||
|
||||
body_encoding: Same as header_encoding, but describes the encoding for the
|
||||
mail message's body, which indeed may be different than the
|
||||
header encoding. Charset.SHORTEST is not allowed for
|
||||
body_encoding.
|
||||
|
||||
output_charset: Some character sets must be converted before they can be
|
||||
used in email headers or bodies. If the input_charset is
|
||||
one of them, this attribute will contain the name of the
|
||||
charset output will be converted to. Otherwise, it will
|
||||
be None.
|
||||
|
||||
input_codec: The name of the Python codec used to convert the
|
||||
input_charset to Unicode. If no conversion codec is
|
||||
necessary, this attribute will be None.
|
||||
|
||||
output_codec: The name of the Python codec used to convert Unicode
|
||||
to the output_charset. If no conversion codec is necessary,
|
||||
this attribute will have the same value as the input_codec.
|
||||
"""
|
||||
def __init__(self, input_charset=DEFAULT_CHARSET):
|
||||
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
|
||||
# unicode because its .lower() is locale insensitive. If the argument
|
||||
# is already a unicode, we leave it at that, but ensure that the
|
||||
# charset is ASCII, as the standard (RFC XXX) requires.
|
||||
try:
|
||||
if isinstance(input_charset, str):
|
||||
input_charset.encode('ascii')
|
||||
else:
|
||||
input_charset = str(input_charset, 'ascii')
|
||||
except UnicodeError:
|
||||
raise errors.CharsetError(input_charset)
|
||||
input_charset = input_charset.lower()
|
||||
# Set the input charset after filtering through the aliases
|
||||
self.input_charset = ALIASES.get(input_charset, input_charset)
|
||||
# We can try to guess which encoding and conversion to use by the
|
||||
# charset_map dictionary. Try that first, but let the user override
|
||||
# it.
|
||||
henc, benc, conv = CHARSETS.get(self.input_charset,
|
||||
(SHORTEST, BASE64, None))
|
||||
if not conv:
|
||||
conv = self.input_charset
|
||||
# Set the attributes, allowing the arguments to override the default.
|
||||
self.header_encoding = henc
|
||||
self.body_encoding = benc
|
||||
self.output_charset = ALIASES.get(conv, conv)
|
||||
# Now set the codecs. If one isn't defined for input_charset,
|
||||
# guess and try a Unicode codec with the same name as input_codec.
|
||||
self.input_codec = CODEC_MAP.get(self.input_charset,
|
||||
self.input_charset)
|
||||
self.output_codec = CODEC_MAP.get(self.output_charset,
|
||||
self.output_charset)
|
||||
|
||||
def __str__(self):
|
||||
return self.input_charset.lower()
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
def __eq__(self, other):
|
||||
return str(self) == str(other).lower()
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def get_body_encoding(self):
|
||||
"""Return the content-transfer-encoding used for body encoding.
|
||||
|
||||
This is either the string `quoted-printable' or `base64' depending on
|
||||
the encoding used, or it is a function in which case you should call
|
||||
the function with a single argument, the Message object being
|
||||
encoded. The function should then set the Content-Transfer-Encoding
|
||||
header itself to whatever is appropriate.
|
||||
|
||||
Returns "quoted-printable" if self.body_encoding is QP.
|
||||
Returns "base64" if self.body_encoding is BASE64.
|
||||
Returns conversion function otherwise.
|
||||
"""
|
||||
assert self.body_encoding != SHORTEST
|
||||
if self.body_encoding == QP:
|
||||
return 'quoted-printable'
|
||||
elif self.body_encoding == BASE64:
|
||||
return 'base64'
|
||||
else:
|
||||
return encode_7or8bit
|
||||
|
||||
def get_output_charset(self):
|
||||
"""Return the output character set.
|
||||
|
||||
This is self.output_charset if that is not None, otherwise it is
|
||||
self.input_charset.
|
||||
"""
|
||||
return self.output_charset or self.input_charset
|
||||
|
||||
def header_encode(self, string):
|
||||
"""Header-encode a string by converting it first to bytes.
|
||||
|
||||
The type of encoding (base64 or quoted-printable) will be based on
|
||||
this charset's `header_encoding`.
|
||||
|
||||
:param string: A unicode string for the header. It must be possible
|
||||
to encode this string to bytes using the character set's
|
||||
output codec.
|
||||
:return: The encoded string, with RFC 2047 chrome.
|
||||
"""
|
||||
codec = self.output_codec or 'us-ascii'
|
||||
header_bytes = _encode(string, codec)
|
||||
# 7bit/8bit encodings return the string unchanged (modulo conversions)
|
||||
encoder_module = self._get_encoder(header_bytes)
|
||||
if encoder_module is None:
|
||||
return string
|
||||
return encoder_module.header_encode(header_bytes, codec)
|
||||
|
||||
def header_encode_lines(self, string, maxlengths):
|
||||
"""Header-encode a string by converting it first to bytes.
|
||||
|
||||
This is similar to `header_encode()` except that the string is fit
|
||||
into maximum line lengths as given by the argument.
|
||||
|
||||
:param string: A unicode string for the header. It must be possible
|
||||
to encode this string to bytes using the character set's
|
||||
output codec.
|
||||
:param maxlengths: Maximum line length iterator. Each element
|
||||
returned from this iterator will provide the next maximum line
|
||||
length. This parameter is used as an argument to built-in next()
|
||||
and should never be exhausted. The maximum line lengths should
|
||||
not count the RFC 2047 chrome. These line lengths are only a
|
||||
hint; the splitter does the best it can.
|
||||
:return: Lines of encoded strings, each with RFC 2047 chrome.
|
||||
"""
|
||||
# See which encoding we should use.
|
||||
codec = self.output_codec or 'us-ascii'
|
||||
header_bytes = _encode(string, codec)
|
||||
encoder_module = self._get_encoder(header_bytes)
|
||||
encoder = partial(encoder_module.header_encode, charset=codec)
|
||||
# Calculate the number of characters that the RFC 2047 chrome will
|
||||
# contribute to each line.
|
||||
charset = self.get_output_charset()
|
||||
extra = len(charset) + RFC2047_CHROME_LEN
|
||||
# Now comes the hard part. We must encode bytes but we can't split on
|
||||
# bytes because some character sets are variable length and each
|
||||
# encoded word must stand on its own. So the problem is you have to
|
||||
# encode to bytes to figure out this word's length, but you must split
|
||||
# on characters. This causes two problems: first, we don't know how
|
||||
# many octets a specific substring of unicode characters will get
|
||||
# encoded to, and second, we don't know how many ASCII characters
|
||||
# those octets will get encoded to. Unless we try it. Which seems
|
||||
# inefficient. In the interest of being correct rather than fast (and
|
||||
# in the hope that there will be few encoded headers in any such
|
||||
# message), brute force it. :(
|
||||
lines = []
|
||||
current_line = []
|
||||
maxlen = next(maxlengths) - extra
|
||||
for character in string:
|
||||
current_line.append(character)
|
||||
this_line = EMPTYSTRING.join(current_line)
|
||||
length = encoder_module.header_length(_encode(this_line, charset))
|
||||
if length > maxlen:
|
||||
# This last character doesn't fit so pop it off.
|
||||
current_line.pop()
|
||||
# Does nothing fit on the first line?
|
||||
if not lines and not current_line:
|
||||
lines.append(None)
|
||||
else:
|
||||
separator = (' ' if lines else '')
|
||||
joined_line = EMPTYSTRING.join(current_line)
|
||||
header_bytes = _encode(joined_line, codec)
|
||||
lines.append(encoder(header_bytes))
|
||||
current_line = [character]
|
||||
maxlen = next(maxlengths) - extra
|
||||
joined_line = EMPTYSTRING.join(current_line)
|
||||
header_bytes = _encode(joined_line, codec)
|
||||
lines.append(encoder(header_bytes))
|
||||
return lines
|
||||
|
||||
def _get_encoder(self, header_bytes):
|
||||
if self.header_encoding == BASE64:
|
||||
return email.base64mime
|
||||
elif self.header_encoding == QP:
|
||||
return email.quoprimime
|
||||
elif self.header_encoding == SHORTEST:
|
||||
len64 = email.base64mime.header_length(header_bytes)
|
||||
lenqp = email.quoprimime.header_length(header_bytes)
|
||||
if len64 < lenqp:
|
||||
return email.base64mime
|
||||
else:
|
||||
return email.quoprimime
|
||||
else:
|
||||
return None
|
||||
|
||||
def body_encode(self, string):
|
||||
"""Body-encode a string by converting it first to bytes.
|
||||
|
||||
The type of encoding (base64 or quoted-printable) will be based on
|
||||
self.body_encoding. If body_encoding is None, we assume the
|
||||
output charset is a 7bit encoding, so re-encoding the decoded
|
||||
string using the ascii codec produces the correct string version
|
||||
of the content.
|
||||
"""
|
||||
if not string:
|
||||
return string
|
||||
if self.body_encoding is BASE64:
|
||||
if isinstance(string, str):
|
||||
string = string.encode(self.output_charset)
|
||||
return email.base64mime.body_encode(string)
|
||||
elif self.body_encoding is QP:
|
||||
# quopromime.body_encode takes a string, but operates on it as if
|
||||
# it were a list of byte codes. For a (minimal) history on why
|
||||
# this is so, see changeset 0cf700464177. To correctly encode a
|
||||
# character set, then, we must turn it into pseudo bytes via the
|
||||
# latin1 charset, which will encode any byte as a single code point
|
||||
# between 0 and 255, which is what body_encode is expecting.
|
||||
if isinstance(string, str):
|
||||
string = string.encode(self.output_charset)
|
||||
string = string.decode('latin1')
|
||||
return email.quoprimime.body_encode(string)
|
||||
else:
|
||||
if isinstance(string, str):
|
||||
string = string.encode(self.output_charset).decode('ascii')
|
||||
return string
|
||||
90
pype/vendor/future/backports/email/encoders.py
vendored
Normal file
90
pype/vendor/future/backports/email/encoders.py
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Encodings and related functions."""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import str
|
||||
|
||||
__all__ = [
|
||||
'encode_7or8bit',
|
||||
'encode_base64',
|
||||
'encode_noop',
|
||||
'encode_quopri',
|
||||
]
|
||||
|
||||
|
||||
try:
|
||||
from base64 import encodebytes as _bencode
|
||||
except ImportError:
|
||||
# Py2 compatibility. TODO: test this!
|
||||
from base64 import encodestring as _bencode
|
||||
from quopri import encodestring as _encodestring
|
||||
|
||||
|
||||
def _qencode(s):
|
||||
enc = _encodestring(s, quotetabs=True)
|
||||
# Must encode spaces, which quopri.encodestring() doesn't do
|
||||
return enc.replace(' ', '=20')
|
||||
|
||||
|
||||
def encode_base64(msg):
|
||||
"""Encode the message's payload in Base64.
|
||||
|
||||
Also, add an appropriate Content-Transfer-Encoding header.
|
||||
"""
|
||||
orig = msg.get_payload()
|
||||
encdata = str(_bencode(orig), 'ascii')
|
||||
msg.set_payload(encdata)
|
||||
msg['Content-Transfer-Encoding'] = 'base64'
|
||||
|
||||
|
||||
def encode_quopri(msg):
|
||||
"""Encode the message's payload in quoted-printable.
|
||||
|
||||
Also, add an appropriate Content-Transfer-Encoding header.
|
||||
"""
|
||||
orig = msg.get_payload()
|
||||
encdata = _qencode(orig)
|
||||
msg.set_payload(encdata)
|
||||
msg['Content-Transfer-Encoding'] = 'quoted-printable'
|
||||
|
||||
|
||||
def encode_7or8bit(msg):
|
||||
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
|
||||
orig = msg.get_payload()
|
||||
if orig is None:
|
||||
# There's no payload. For backwards compatibility we use 7bit
|
||||
msg['Content-Transfer-Encoding'] = '7bit'
|
||||
return
|
||||
# We play a trick to make this go fast. If encoding/decode to ASCII
|
||||
# succeeds, we know the data must be 7bit, otherwise treat it as 8bit.
|
||||
try:
|
||||
if isinstance(orig, str):
|
||||
orig.encode('ascii')
|
||||
else:
|
||||
orig.decode('ascii')
|
||||
except UnicodeError:
|
||||
charset = msg.get_charset()
|
||||
output_cset = charset and charset.output_charset
|
||||
# iso-2022-* is non-ASCII but encodes to a 7-bit representation
|
||||
if output_cset and output_cset.lower().startswith('iso-2022-'):
|
||||
msg['Content-Transfer-Encoding'] = '7bit'
|
||||
else:
|
||||
msg['Content-Transfer-Encoding'] = '8bit'
|
||||
else:
|
||||
msg['Content-Transfer-Encoding'] = '7bit'
|
||||
if not isinstance(orig, str):
|
||||
msg.set_payload(orig.decode('ascii', 'surrogateescape'))
|
||||
|
||||
|
||||
def encode_noop(msg):
|
||||
"""Do nothing."""
|
||||
# Well, not quite *nothing*: in Python3 we have to turn bytes into a string
|
||||
# in our internal surrogateescaped form in order to keep the model
|
||||
# consistent.
|
||||
orig = msg.get_payload()
|
||||
if not isinstance(orig, str):
|
||||
msg.set_payload(orig.decode('ascii', 'surrogateescape'))
|
||||
111
pype/vendor/future/backports/email/errors.py
vendored
Normal file
111
pype/vendor/future/backports/email/errors.py
vendored
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""email package exception classes."""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import super
|
||||
|
||||
|
||||
class MessageError(Exception):
|
||||
"""Base class for errors in the email package."""
|
||||
|
||||
|
||||
class MessageParseError(MessageError):
|
||||
"""Base class for message parsing errors."""
|
||||
|
||||
|
||||
class HeaderParseError(MessageParseError):
|
||||
"""Error while parsing headers."""
|
||||
|
||||
|
||||
class BoundaryError(MessageParseError):
|
||||
"""Couldn't find terminating boundary."""
|
||||
|
||||
|
||||
class MultipartConversionError(MessageError, TypeError):
|
||||
"""Conversion to a multipart is prohibited."""
|
||||
|
||||
|
||||
class CharsetError(MessageError):
|
||||
"""An illegal charset was given."""
|
||||
|
||||
|
||||
# These are parsing defects which the parser was able to work around.
|
||||
class MessageDefect(ValueError):
|
||||
"""Base class for a message defect."""
|
||||
|
||||
def __init__(self, line=None):
|
||||
if line is not None:
|
||||
super().__init__(line)
|
||||
self.line = line
|
||||
|
||||
class NoBoundaryInMultipartDefect(MessageDefect):
|
||||
"""A message claimed to be a multipart but had no boundary parameter."""
|
||||
|
||||
class StartBoundaryNotFoundDefect(MessageDefect):
|
||||
"""The claimed start boundary was never found."""
|
||||
|
||||
class CloseBoundaryNotFoundDefect(MessageDefect):
|
||||
"""A start boundary was found, but not the corresponding close boundary."""
|
||||
|
||||
class FirstHeaderLineIsContinuationDefect(MessageDefect):
|
||||
"""A message had a continuation line as its first header line."""
|
||||
|
||||
class MisplacedEnvelopeHeaderDefect(MessageDefect):
|
||||
"""A 'Unix-from' header was found in the middle of a header block."""
|
||||
|
||||
class MissingHeaderBodySeparatorDefect(MessageDefect):
|
||||
"""Found line with no leading whitespace and no colon before blank line."""
|
||||
# XXX: backward compatibility, just in case (it was never emitted).
|
||||
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
|
||||
|
||||
class MultipartInvariantViolationDefect(MessageDefect):
|
||||
"""A message claimed to be a multipart but no subparts were found."""
|
||||
|
||||
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
|
||||
"""An invalid content transfer encoding was set on the multipart itself."""
|
||||
|
||||
class UndecodableBytesDefect(MessageDefect):
|
||||
"""Header contained bytes that could not be decoded"""
|
||||
|
||||
class InvalidBase64PaddingDefect(MessageDefect):
|
||||
"""base64 encoded sequence had an incorrect length"""
|
||||
|
||||
class InvalidBase64CharactersDefect(MessageDefect):
|
||||
"""base64 encoded sequence had characters not in base64 alphabet"""
|
||||
|
||||
# These errors are specific to header parsing.
|
||||
|
||||
class HeaderDefect(MessageDefect):
|
||||
"""Base class for a header defect."""
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
|
||||
class InvalidHeaderDefect(HeaderDefect):
|
||||
"""Header is not valid, message gives details."""
|
||||
|
||||
class HeaderMissingRequiredValue(HeaderDefect):
|
||||
"""A header that must have a value had none"""
|
||||
|
||||
class NonPrintableDefect(HeaderDefect):
|
||||
"""ASCII characters outside the ascii-printable range found"""
|
||||
|
||||
def __init__(self, non_printables):
|
||||
super().__init__(non_printables)
|
||||
self.non_printables = non_printables
|
||||
|
||||
def __str__(self):
|
||||
return ("the following ASCII non-printables found in header: "
|
||||
"{}".format(self.non_printables))
|
||||
|
||||
class ObsoleteHeaderDefect(HeaderDefect):
|
||||
"""Header uses syntax declared obsolete by RFC 5322"""
|
||||
|
||||
class NonASCIILocalPartDefect(HeaderDefect):
|
||||
"""local_part contains non-ASCII characters"""
|
||||
# This defect only occurs during unicode parsing, not when
|
||||
# parsing messages decoded from binary.
|
||||
525
pype/vendor/future/backports/email/feedparser.py
vendored
Normal file
525
pype/vendor/future/backports/email/feedparser.py
vendored
Normal file
|
|
@ -0,0 +1,525 @@
|
|||
# Copyright (C) 2004-2006 Python Software Foundation
|
||||
# Authors: Baxter, Wouters and Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""FeedParser - An email feed parser.
|
||||
|
||||
The feed parser implements an interface for incrementally parsing an email
|
||||
message, line by line. This has advantages for certain applications, such as
|
||||
those reading email messages off a socket.
|
||||
|
||||
FeedParser.feed() is the primary interface for pushing new data into the
|
||||
parser. It returns when there's nothing more it can do with the available
|
||||
data. When you have no more data to push into the parser, call .close().
|
||||
This completes the parsing and returns the root message object.
|
||||
|
||||
The other advantage of this parser is that it will never raise a parsing
|
||||
exception. Instead, when it finds something unexpected, it adds a 'defect' to
|
||||
the current message. Defects are just instances that live on the message
|
||||
object's .defects attribute.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import object, range, super
|
||||
from future.utils import implements_iterator, PY3
|
||||
|
||||
__all__ = ['FeedParser', 'BytesFeedParser']
|
||||
|
||||
import re
|
||||
|
||||
from future.backports.email import errors
|
||||
from future.backports.email import message
|
||||
from future.backports.email._policybase import compat32
|
||||
|
||||
NLCRE = re.compile('\r\n|\r|\n')
|
||||
NLCRE_bol = re.compile('(\r\n|\r|\n)')
|
||||
NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
|
||||
NLCRE_crack = re.compile('(\r\n|\r|\n)')
|
||||
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
|
||||
# except controls, SP, and ":".
|
||||
headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
|
||||
EMPTYSTRING = ''
|
||||
NL = '\n'
|
||||
|
||||
NeedMoreData = object()
|
||||
|
||||
|
||||
# @implements_iterator
|
||||
class BufferedSubFile(object):
|
||||
"""A file-ish object that can have new data loaded into it.
|
||||
|
||||
You can also push and pop line-matching predicates onto a stack. When the
|
||||
current predicate matches the current line, a false EOF response
|
||||
(i.e. empty string) is returned instead. This lets the parser adhere to a
|
||||
simple abstraction -- it parses until EOF closes the current message.
|
||||
"""
|
||||
def __init__(self):
|
||||
# The last partial line pushed into this object.
|
||||
self._partial = ''
|
||||
# The list of full, pushed lines, in reverse order
|
||||
self._lines = []
|
||||
# The stack of false-EOF checking predicates.
|
||||
self._eofstack = []
|
||||
# A flag indicating whether the file has been closed or not.
|
||||
self._closed = False
|
||||
|
||||
def push_eof_matcher(self, pred):
|
||||
self._eofstack.append(pred)
|
||||
|
||||
def pop_eof_matcher(self):
|
||||
return self._eofstack.pop()
|
||||
|
||||
def close(self):
|
||||
# Don't forget any trailing partial line.
|
||||
self._lines.append(self._partial)
|
||||
self._partial = ''
|
||||
self._closed = True
|
||||
|
||||
def readline(self):
|
||||
if not self._lines:
|
||||
if self._closed:
|
||||
return ''
|
||||
return NeedMoreData
|
||||
# Pop the line off the stack and see if it matches the current
|
||||
# false-EOF predicate.
|
||||
line = self._lines.pop()
|
||||
# RFC 2046, section 5.1.2 requires us to recognize outer level
|
||||
# boundaries at any level of inner nesting. Do this, but be sure it's
|
||||
# in the order of most to least nested.
|
||||
for ateof in self._eofstack[::-1]:
|
||||
if ateof(line):
|
||||
# We're at the false EOF. But push the last line back first.
|
||||
self._lines.append(line)
|
||||
return ''
|
||||
return line
|
||||
|
||||
def unreadline(self, line):
|
||||
# Let the consumer push a line back into the buffer.
|
||||
assert line is not NeedMoreData
|
||||
self._lines.append(line)
|
||||
|
||||
def push(self, data):
|
||||
"""Push some new data into this object."""
|
||||
# Handle any previous leftovers
|
||||
data, self._partial = self._partial + data, ''
|
||||
# Crack into lines, but preserve the newlines on the end of each
|
||||
parts = NLCRE_crack.split(data)
|
||||
# The *ahem* interesting behaviour of re.split when supplied grouping
|
||||
# parentheses is that the last element of the resulting list is the
|
||||
# data after the final RE. In the case of a NL/CR terminated string,
|
||||
# this is the empty string.
|
||||
self._partial = parts.pop()
|
||||
#GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
|
||||
# is there a \n to follow later?
|
||||
if not self._partial and parts and parts[-1].endswith('\r'):
|
||||
self._partial = parts.pop(-2)+parts.pop()
|
||||
# parts is a list of strings, alternating between the line contents
|
||||
# and the eol character(s). Gather up a list of lines after
|
||||
# re-attaching the newlines.
|
||||
lines = []
|
||||
for i in range(len(parts) // 2):
|
||||
lines.append(parts[i*2] + parts[i*2+1])
|
||||
self.pushlines(lines)
|
||||
|
||||
def pushlines(self, lines):
|
||||
# Reverse and insert at the front of the lines.
|
||||
self._lines[:0] = lines[::-1]
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
line = self.readline()
|
||||
if line == '':
|
||||
raise StopIteration
|
||||
return line
|
||||
|
||||
|
||||
class FeedParser(object):
|
||||
"""A feed-style parser of email."""
|
||||
|
||||
def __init__(self, _factory=message.Message, **_3to2kwargs):
|
||||
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
|
||||
else: policy = compat32
|
||||
"""_factory is called with no arguments to create a new message obj
|
||||
|
||||
The policy keyword specifies a policy object that controls a number of
|
||||
aspects of the parser's operation. The default policy maintains
|
||||
backward compatibility.
|
||||
|
||||
"""
|
||||
self._factory = _factory
|
||||
self.policy = policy
|
||||
try:
|
||||
_factory(policy=self.policy)
|
||||
self._factory_kwds = lambda: {'policy': self.policy}
|
||||
except TypeError:
|
||||
# Assume this is an old-style factory
|
||||
self._factory_kwds = lambda: {}
|
||||
self._input = BufferedSubFile()
|
||||
self._msgstack = []
|
||||
if PY3:
|
||||
self._parse = self._parsegen().__next__
|
||||
else:
|
||||
self._parse = self._parsegen().next
|
||||
self._cur = None
|
||||
self._last = None
|
||||
self._headersonly = False
|
||||
|
||||
# Non-public interface for supporting Parser's headersonly flag
|
||||
def _set_headersonly(self):
|
||||
self._headersonly = True
|
||||
|
||||
def feed(self, data):
|
||||
"""Push more data into the parser."""
|
||||
self._input.push(data)
|
||||
self._call_parse()
|
||||
|
||||
def _call_parse(self):
|
||||
try:
|
||||
self._parse()
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"""Parse all remaining data and return the root message object."""
|
||||
self._input.close()
|
||||
self._call_parse()
|
||||
root = self._pop_message()
|
||||
assert not self._msgstack
|
||||
# Look for final set of defects
|
||||
if root.get_content_maintype() == 'multipart' \
|
||||
and not root.is_multipart():
|
||||
defect = errors.MultipartInvariantViolationDefect()
|
||||
self.policy.handle_defect(root, defect)
|
||||
return root
|
||||
|
||||
def _new_message(self):
|
||||
msg = self._factory(**self._factory_kwds())
|
||||
if self._cur and self._cur.get_content_type() == 'multipart/digest':
|
||||
msg.set_default_type('message/rfc822')
|
||||
if self._msgstack:
|
||||
self._msgstack[-1].attach(msg)
|
||||
self._msgstack.append(msg)
|
||||
self._cur = msg
|
||||
self._last = msg
|
||||
|
||||
def _pop_message(self):
|
||||
retval = self._msgstack.pop()
|
||||
if self._msgstack:
|
||||
self._cur = self._msgstack[-1]
|
||||
else:
|
||||
self._cur = None
|
||||
return retval
|
||||
|
||||
def _parsegen(self):
|
||||
# Create a new message and start by parsing headers.
|
||||
self._new_message()
|
||||
headers = []
|
||||
# Collect the headers, searching for a line that doesn't match the RFC
|
||||
# 2822 header or continuation pattern (including an empty line).
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
if not headerRE.match(line):
|
||||
# If we saw the RFC defined header/body separator
|
||||
# (i.e. newline), just throw it away. Otherwise the line is
|
||||
# part of the body so push it back.
|
||||
if not NLCRE.match(line):
|
||||
defect = errors.MissingHeaderBodySeparatorDefect()
|
||||
self.policy.handle_defect(self._cur, defect)
|
||||
self._input.unreadline(line)
|
||||
break
|
||||
headers.append(line)
|
||||
# Done with the headers, so parse them and figure out what we're
|
||||
# supposed to see in the body of the message.
|
||||
self._parse_headers(headers)
|
||||
# Headers-only parsing is a backwards compatibility hack, which was
|
||||
# necessary in the older parser, which could raise errors. All
|
||||
# remaining lines in the input are thrown into the message body.
|
||||
if self._headersonly:
|
||||
lines = []
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
if line == '':
|
||||
break
|
||||
lines.append(line)
|
||||
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||
return
|
||||
if self._cur.get_content_type() == 'message/delivery-status':
|
||||
# message/delivery-status contains blocks of headers separated by
|
||||
# a blank line. We'll represent each header block as a separate
|
||||
# nested message object, but the processing is a bit different
|
||||
# than standard message/* types because there is no body for the
|
||||
# nested messages. A blank line separates the subparts.
|
||||
while True:
|
||||
self._input.push_eof_matcher(NLCRE.match)
|
||||
for retval in self._parsegen():
|
||||
if retval is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
msg = self._pop_message()
|
||||
# We need to pop the EOF matcher in order to tell if we're at
|
||||
# the end of the current file, not the end of the last block
|
||||
# of message headers.
|
||||
self._input.pop_eof_matcher()
|
||||
# The input stream must be sitting at the newline or at the
|
||||
# EOF. We want to see if we're at the end of this subpart, so
|
||||
# first consume the blank line, then test the next line to see
|
||||
# if we're at this subpart's EOF.
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
# Not at EOF so this is a line we're going to need.
|
||||
self._input.unreadline(line)
|
||||
return
|
||||
if self._cur.get_content_maintype() == 'message':
|
||||
# The message claims to be a message/* type, then what follows is
|
||||
# another RFC 2822 message.
|
||||
for retval in self._parsegen():
|
||||
if retval is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
self._pop_message()
|
||||
return
|
||||
if self._cur.get_content_maintype() == 'multipart':
|
||||
boundary = self._cur.get_boundary()
|
||||
if boundary is None:
|
||||
# The message /claims/ to be a multipart but it has not
|
||||
# defined a boundary. That's a problem which we'll handle by
|
||||
# reading everything until the EOF and marking the message as
|
||||
# defective.
|
||||
defect = errors.NoBoundaryInMultipartDefect()
|
||||
self.policy.handle_defect(self._cur, defect)
|
||||
lines = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
lines.append(line)
|
||||
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||
return
|
||||
# Make sure a valid content type was specified per RFC 2045:6.4.
|
||||
if (self._cur.get('content-transfer-encoding', '8bit').lower()
|
||||
not in ('7bit', '8bit', 'binary')):
|
||||
defect = errors.InvalidMultipartContentTransferEncodingDefect()
|
||||
self.policy.handle_defect(self._cur, defect)
|
||||
# Create a line match predicate which matches the inter-part
|
||||
# boundary as well as the end-of-multipart boundary. Don't push
|
||||
# this onto the input stream until we've scanned past the
|
||||
# preamble.
|
||||
separator = '--' + boundary
|
||||
boundaryre = re.compile(
|
||||
'(?P<sep>' + re.escape(separator) +
|
||||
r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
|
||||
capturing_preamble = True
|
||||
preamble = []
|
||||
linesep = False
|
||||
close_boundary_seen = False
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
if line == '':
|
||||
break
|
||||
mo = boundaryre.match(line)
|
||||
if mo:
|
||||
# If we're looking at the end boundary, we're done with
|
||||
# this multipart. If there was a newline at the end of
|
||||
# the closing boundary, then we need to initialize the
|
||||
# epilogue with the empty string (see below).
|
||||
if mo.group('end'):
|
||||
close_boundary_seen = True
|
||||
linesep = mo.group('linesep')
|
||||
break
|
||||
# We saw an inter-part boundary. Were we in the preamble?
|
||||
if capturing_preamble:
|
||||
if preamble:
|
||||
# According to RFC 2046, the last newline belongs
|
||||
# to the boundary.
|
||||
lastline = preamble[-1]
|
||||
eolmo = NLCRE_eol.search(lastline)
|
||||
if eolmo:
|
||||
preamble[-1] = lastline[:-len(eolmo.group(0))]
|
||||
self._cur.preamble = EMPTYSTRING.join(preamble)
|
||||
capturing_preamble = False
|
||||
self._input.unreadline(line)
|
||||
continue
|
||||
# We saw a boundary separating two parts. Consume any
|
||||
# multiple boundary lines that may be following. Our
|
||||
# interpretation of RFC 2046 BNF grammar does not produce
|
||||
# body parts within such double boundaries.
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
mo = boundaryre.match(line)
|
||||
if not mo:
|
||||
self._input.unreadline(line)
|
||||
break
|
||||
# Recurse to parse this subpart; the input stream points
|
||||
# at the subpart's first line.
|
||||
self._input.push_eof_matcher(boundaryre.match)
|
||||
for retval in self._parsegen():
|
||||
if retval is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
# Because of RFC 2046, the newline preceding the boundary
|
||||
# separator actually belongs to the boundary, not the
|
||||
# previous subpart's payload (or epilogue if the previous
|
||||
# part is a multipart).
|
||||
if self._last.get_content_maintype() == 'multipart':
|
||||
epilogue = self._last.epilogue
|
||||
if epilogue == '':
|
||||
self._last.epilogue = None
|
||||
elif epilogue is not None:
|
||||
mo = NLCRE_eol.search(epilogue)
|
||||
if mo:
|
||||
end = len(mo.group(0))
|
||||
self._last.epilogue = epilogue[:-end]
|
||||
else:
|
||||
payload = self._last._payload
|
||||
if isinstance(payload, str):
|
||||
mo = NLCRE_eol.search(payload)
|
||||
if mo:
|
||||
payload = payload[:-len(mo.group(0))]
|
||||
self._last._payload = payload
|
||||
self._input.pop_eof_matcher()
|
||||
self._pop_message()
|
||||
# Set the multipart up for newline cleansing, which will
|
||||
# happen if we're in a nested multipart.
|
||||
self._last = self._cur
|
||||
else:
|
||||
# I think we must be in the preamble
|
||||
assert capturing_preamble
|
||||
preamble.append(line)
|
||||
# We've seen either the EOF or the end boundary. If we're still
|
||||
# capturing the preamble, we never saw the start boundary. Note
|
||||
# that as a defect and store the captured text as the payload.
|
||||
if capturing_preamble:
|
||||
defect = errors.StartBoundaryNotFoundDefect()
|
||||
self.policy.handle_defect(self._cur, defect)
|
||||
self._cur.set_payload(EMPTYSTRING.join(preamble))
|
||||
epilogue = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
self._cur.epilogue = EMPTYSTRING.join(epilogue)
|
||||
return
|
||||
# If we're not processing the preamble, then we might have seen
|
||||
# EOF without seeing that end boundary...that is also a defect.
|
||||
if not close_boundary_seen:
|
||||
defect = errors.CloseBoundaryNotFoundDefect()
|
||||
self.policy.handle_defect(self._cur, defect)
|
||||
return
|
||||
# Everything from here to the EOF is epilogue. If the end boundary
|
||||
# ended in a newline, we'll need to make sure the epilogue isn't
|
||||
# None
|
||||
if linesep:
|
||||
epilogue = ['']
|
||||
else:
|
||||
epilogue = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
epilogue.append(line)
|
||||
# Any CRLF at the front of the epilogue is not technically part of
|
||||
# the epilogue. Also, watch out for an empty string epilogue,
|
||||
# which means a single newline.
|
||||
if epilogue:
|
||||
firstline = epilogue[0]
|
||||
bolmo = NLCRE_bol.match(firstline)
|
||||
if bolmo:
|
||||
epilogue[0] = firstline[len(bolmo.group(0)):]
|
||||
self._cur.epilogue = EMPTYSTRING.join(epilogue)
|
||||
return
|
||||
# Otherwise, it's some non-multipart type, so the entire rest of the
|
||||
# file contents becomes the payload.
|
||||
lines = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
lines.append(line)
|
||||
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||
|
||||
def _parse_headers(self, lines):
|
||||
# Passed a list of lines that make up the headers for the current msg
|
||||
lastheader = ''
|
||||
lastvalue = []
|
||||
for lineno, line in enumerate(lines):
|
||||
# Check for continuation
|
||||
if line[0] in ' \t':
|
||||
if not lastheader:
|
||||
# The first line of the headers was a continuation. This
|
||||
# is illegal, so let's note the defect, store the illegal
|
||||
# line, and ignore it for purposes of headers.
|
||||
defect = errors.FirstHeaderLineIsContinuationDefect(line)
|
||||
self.policy.handle_defect(self._cur, defect)
|
||||
continue
|
||||
lastvalue.append(line)
|
||||
continue
|
||||
if lastheader:
|
||||
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
|
||||
lastheader, lastvalue = '', []
|
||||
# Check for envelope header, i.e. unix-from
|
||||
if line.startswith('From '):
|
||||
if lineno == 0:
|
||||
# Strip off the trailing newline
|
||||
mo = NLCRE_eol.search(line)
|
||||
if mo:
|
||||
line = line[:-len(mo.group(0))]
|
||||
self._cur.set_unixfrom(line)
|
||||
continue
|
||||
elif lineno == len(lines) - 1:
|
||||
# Something looking like a unix-from at the end - it's
|
||||
# probably the first line of the body, so push back the
|
||||
# line and stop.
|
||||
self._input.unreadline(line)
|
||||
return
|
||||
else:
|
||||
# Weirdly placed unix-from line. Note this as a defect
|
||||
# and ignore it.
|
||||
defect = errors.MisplacedEnvelopeHeaderDefect(line)
|
||||
self._cur.defects.append(defect)
|
||||
continue
|
||||
# Split the line on the colon separating field name from value.
|
||||
# There will always be a colon, because if there wasn't the part of
|
||||
# the parser that calls us would have started parsing the body.
|
||||
i = line.find(':')
|
||||
assert i>0, "_parse_headers fed line with no : and no leading WS"
|
||||
lastheader = line[:i]
|
||||
lastvalue = [line]
|
||||
# Done with all the lines, so handle the last header.
|
||||
if lastheader:
|
||||
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
|
||||
|
||||
|
||||
class BytesFeedParser(FeedParser):
|
||||
"""Like FeedParser, but feed accepts bytes."""
|
||||
|
||||
def feed(self, data):
|
||||
super().feed(data.decode('ascii', 'surrogateescape'))
|
||||
498
pype/vendor/future/backports/email/generator.py
vendored
Normal file
498
pype/vendor/future/backports/email/generator.py
vendored
Normal file
|
|
@ -0,0 +1,498 @@
|
|||
# Copyright (C) 2001-2010 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Classes to generate plain text from a message object tree."""
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import
|
||||
from future.builtins import super
|
||||
from future.builtins import str
|
||||
|
||||
__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
import warnings
|
||||
|
||||
from io import StringIO, BytesIO
|
||||
from future.backports.email._policybase import compat32
|
||||
from future.backports.email.header import Header
|
||||
from future.backports.email.utils import _has_surrogates
|
||||
import future.backports.email.charset as _charset
|
||||
|
||||
UNDERSCORE = '_'
|
||||
NL = '\n' # XXX: no longer used by the code below.
|
||||
|
||||
fcre = re.compile(r'^From ', re.MULTILINE)
|
||||
|
||||
|
||||
class Generator(object):
|
||||
"""Generates output from a Message object tree.
|
||||
|
||||
This basic generator writes the message to the given file object as plain
|
||||
text.
|
||||
"""
|
||||
#
|
||||
# Public interface
|
||||
#
|
||||
|
||||
def __init__(self, outfp, mangle_from_=True, maxheaderlen=None, **_3to2kwargs):
|
||||
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
|
||||
else: policy = None
|
||||
"""Create the generator for message flattening.
|
||||
|
||||
outfp is the output file-like object for writing the message to. It
|
||||
must have a write() method.
|
||||
|
||||
Optional mangle_from_ is a flag that, when True (the default), escapes
|
||||
From_ lines in the body of the message by putting a `>' in front of
|
||||
them.
|
||||
|
||||
Optional maxheaderlen specifies the longest length for a non-continued
|
||||
header. When a header line is longer (in characters, with tabs
|
||||
expanded to 8 spaces) than maxheaderlen, the header will split as
|
||||
defined in the Header class. Set maxheaderlen to zero to disable
|
||||
header wrapping. The default is 78, as recommended (but not required)
|
||||
by RFC 2822.
|
||||
|
||||
The policy keyword specifies a policy object that controls a number of
|
||||
aspects of the generator's operation. The default policy maintains
|
||||
backward compatibility.
|
||||
|
||||
"""
|
||||
self._fp = outfp
|
||||
self._mangle_from_ = mangle_from_
|
||||
self.maxheaderlen = maxheaderlen
|
||||
self.policy = policy
|
||||
|
||||
def write(self, s):
|
||||
# Just delegate to the file object
|
||||
self._fp.write(s)
|
||||
|
||||
def flatten(self, msg, unixfrom=False, linesep=None):
|
||||
r"""Print the message object tree rooted at msg to the output file
|
||||
specified when the Generator instance was created.
|
||||
|
||||
unixfrom is a flag that forces the printing of a Unix From_ delimiter
|
||||
before the first object in the message tree. If the original message
|
||||
has no From_ delimiter, a `standard' one is crafted. By default, this
|
||||
is False to inhibit the printing of any From_ delimiter.
|
||||
|
||||
Note that for subobjects, no From_ line is printed.
|
||||
|
||||
linesep specifies the characters used to indicate a new line in
|
||||
the output. The default value is determined by the policy.
|
||||
|
||||
"""
|
||||
# We use the _XXX constants for operating on data that comes directly
|
||||
# from the msg, and _encoded_XXX constants for operating on data that
|
||||
# has already been converted (to bytes in the BytesGenerator) and
|
||||
# inserted into a temporary buffer.
|
||||
policy = msg.policy if self.policy is None else self.policy
|
||||
if linesep is not None:
|
||||
policy = policy.clone(linesep=linesep)
|
||||
if self.maxheaderlen is not None:
|
||||
policy = policy.clone(max_line_length=self.maxheaderlen)
|
||||
self._NL = policy.linesep
|
||||
self._encoded_NL = self._encode(self._NL)
|
||||
self._EMPTY = ''
|
||||
self._encoded_EMTPY = self._encode('')
|
||||
# Because we use clone (below) when we recursively process message
|
||||
# subparts, and because clone uses the computed policy (not None),
|
||||
# submessages will automatically get set to the computed policy when
|
||||
# they are processed by this code.
|
||||
old_gen_policy = self.policy
|
||||
old_msg_policy = msg.policy
|
||||
try:
|
||||
self.policy = policy
|
||||
msg.policy = policy
|
||||
if unixfrom:
|
||||
ufrom = msg.get_unixfrom()
|
||||
if not ufrom:
|
||||
ufrom = 'From nobody ' + time.ctime(time.time())
|
||||
self.write(ufrom + self._NL)
|
||||
self._write(msg)
|
||||
finally:
|
||||
self.policy = old_gen_policy
|
||||
msg.policy = old_msg_policy
|
||||
|
||||
def clone(self, fp):
|
||||
"""Clone this generator with the exact same options."""
|
||||
return self.__class__(fp,
|
||||
self._mangle_from_,
|
||||
None, # Use policy setting, which we've adjusted
|
||||
policy=self.policy)
|
||||
|
||||
#
|
||||
# Protected interface - undocumented ;/
|
||||
#
|
||||
|
||||
# Note that we use 'self.write' when what we are writing is coming from
|
||||
# the source, and self._fp.write when what we are writing is coming from a
|
||||
# buffer (because the Bytes subclass has already had a chance to transform
|
||||
# the data in its write method in that case). This is an entirely
|
||||
# pragmatic split determined by experiment; we could be more general by
|
||||
# always using write and having the Bytes subclass write method detect when
|
||||
# it has already transformed the input; but, since this whole thing is a
|
||||
# hack anyway this seems good enough.
|
||||
|
||||
# Similarly, we have _XXX and _encoded_XXX attributes that are used on
|
||||
# source and buffer data, respectively.
|
||||
_encoded_EMPTY = ''
|
||||
|
||||
def _new_buffer(self):
|
||||
# BytesGenerator overrides this to return BytesIO.
|
||||
return StringIO()
|
||||
|
||||
def _encode(self, s):
|
||||
# BytesGenerator overrides this to encode strings to bytes.
|
||||
return s
|
||||
|
||||
def _write_lines(self, lines):
|
||||
# We have to transform the line endings.
|
||||
if not lines:
|
||||
return
|
||||
lines = lines.splitlines(True)
|
||||
for line in lines[:-1]:
|
||||
self.write(line.rstrip('\r\n'))
|
||||
self.write(self._NL)
|
||||
laststripped = lines[-1].rstrip('\r\n')
|
||||
self.write(laststripped)
|
||||
if len(lines[-1]) != len(laststripped):
|
||||
self.write(self._NL)
|
||||
|
||||
def _write(self, msg):
|
||||
# We can't write the headers yet because of the following scenario:
|
||||
# say a multipart message includes the boundary string somewhere in
|
||||
# its body. We'd have to calculate the new boundary /before/ we write
|
||||
# the headers so that we can write the correct Content-Type:
|
||||
# parameter.
|
||||
#
|
||||
# The way we do this, so as to make the _handle_*() methods simpler,
|
||||
# is to cache any subpart writes into a buffer. The we write the
|
||||
# headers and the buffer contents. That way, subpart handlers can
|
||||
# Do The Right Thing, and can still modify the Content-Type: header if
|
||||
# necessary.
|
||||
oldfp = self._fp
|
||||
try:
|
||||
self._fp = sfp = self._new_buffer()
|
||||
self._dispatch(msg)
|
||||
finally:
|
||||
self._fp = oldfp
|
||||
# Write the headers. First we see if the message object wants to
|
||||
# handle that itself. If not, we'll do it generically.
|
||||
meth = getattr(msg, '_write_headers', None)
|
||||
if meth is None:
|
||||
self._write_headers(msg)
|
||||
else:
|
||||
meth(self)
|
||||
self._fp.write(sfp.getvalue())
|
||||
|
||||
def _dispatch(self, msg):
|
||||
# Get the Content-Type: for the message, then try to dispatch to
|
||||
# self._handle_<maintype>_<subtype>(). If there's no handler for the
|
||||
# full MIME type, then dispatch to self._handle_<maintype>(). If
|
||||
# that's missing too, then dispatch to self._writeBody().
|
||||
main = msg.get_content_maintype()
|
||||
sub = msg.get_content_subtype()
|
||||
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
|
||||
meth = getattr(self, '_handle_' + specific, None)
|
||||
if meth is None:
|
||||
generic = main.replace('-', '_')
|
||||
meth = getattr(self, '_handle_' + generic, None)
|
||||
if meth is None:
|
||||
meth = self._writeBody
|
||||
meth(msg)
|
||||
|
||||
#
|
||||
# Default handlers
|
||||
#
|
||||
|
||||
def _write_headers(self, msg):
|
||||
for h, v in msg.raw_items():
|
||||
self.write(self.policy.fold(h, v))
|
||||
# A blank line always separates headers from body
|
||||
self.write(self._NL)
|
||||
|
||||
#
|
||||
# Handlers for writing types and subtypes
|
||||
#
|
||||
|
||||
def _handle_text(self, msg):
|
||||
payload = msg.get_payload()
|
||||
if payload is None:
|
||||
return
|
||||
if not isinstance(payload, str):
|
||||
raise TypeError('string payload expected: %s' % type(payload))
|
||||
if _has_surrogates(msg._payload):
|
||||
charset = msg.get_param('charset')
|
||||
if charset is not None:
|
||||
del msg['content-transfer-encoding']
|
||||
msg.set_payload(payload, charset)
|
||||
payload = msg.get_payload()
|
||||
if self._mangle_from_:
|
||||
payload = fcre.sub('>From ', payload)
|
||||
self._write_lines(payload)
|
||||
|
||||
# Default body handler
|
||||
_writeBody = _handle_text
|
||||
|
||||
def _handle_multipart(self, msg):
|
||||
# The trick here is to write out each part separately, merge them all
|
||||
# together, and then make sure that the boundary we've chosen isn't
|
||||
# present in the payload.
|
||||
msgtexts = []
|
||||
subparts = msg.get_payload()
|
||||
if subparts is None:
|
||||
subparts = []
|
||||
elif isinstance(subparts, str):
|
||||
# e.g. a non-strict parse of a message with no starting boundary.
|
||||
self.write(subparts)
|
||||
return
|
||||
elif not isinstance(subparts, list):
|
||||
# Scalar payload
|
||||
subparts = [subparts]
|
||||
for part in subparts:
|
||||
s = self._new_buffer()
|
||||
g = self.clone(s)
|
||||
g.flatten(part, unixfrom=False, linesep=self._NL)
|
||||
msgtexts.append(s.getvalue())
|
||||
# BAW: What about boundaries that are wrapped in double-quotes?
|
||||
boundary = msg.get_boundary()
|
||||
if not boundary:
|
||||
# Create a boundary that doesn't appear in any of the
|
||||
# message texts.
|
||||
alltext = self._encoded_NL.join(msgtexts)
|
||||
boundary = self._make_boundary(alltext)
|
||||
msg.set_boundary(boundary)
|
||||
# If there's a preamble, write it out, with a trailing CRLF
|
||||
if msg.preamble is not None:
|
||||
if self._mangle_from_:
|
||||
preamble = fcre.sub('>From ', msg.preamble)
|
||||
else:
|
||||
preamble = msg.preamble
|
||||
self._write_lines(preamble)
|
||||
self.write(self._NL)
|
||||
# dash-boundary transport-padding CRLF
|
||||
self.write('--' + boundary + self._NL)
|
||||
# body-part
|
||||
if msgtexts:
|
||||
self._fp.write(msgtexts.pop(0))
|
||||
# *encapsulation
|
||||
# --> delimiter transport-padding
|
||||
# --> CRLF body-part
|
||||
for body_part in msgtexts:
|
||||
# delimiter transport-padding CRLF
|
||||
self.write(self._NL + '--' + boundary + self._NL)
|
||||
# body-part
|
||||
self._fp.write(body_part)
|
||||
# close-delimiter transport-padding
|
||||
self.write(self._NL + '--' + boundary + '--')
|
||||
if msg.epilogue is not None:
|
||||
self.write(self._NL)
|
||||
if self._mangle_from_:
|
||||
epilogue = fcre.sub('>From ', msg.epilogue)
|
||||
else:
|
||||
epilogue = msg.epilogue
|
||||
self._write_lines(epilogue)
|
||||
|
||||
def _handle_multipart_signed(self, msg):
|
||||
# The contents of signed parts has to stay unmodified in order to keep
|
||||
# the signature intact per RFC1847 2.1, so we disable header wrapping.
|
||||
# RDM: This isn't enough to completely preserve the part, but it helps.
|
||||
p = self.policy
|
||||
self.policy = p.clone(max_line_length=0)
|
||||
try:
|
||||
self._handle_multipart(msg)
|
||||
finally:
|
||||
self.policy = p
|
||||
|
||||
def _handle_message_delivery_status(self, msg):
|
||||
# We can't just write the headers directly to self's file object
|
||||
# because this will leave an extra newline between the last header
|
||||
# block and the boundary. Sigh.
|
||||
blocks = []
|
||||
for part in msg.get_payload():
|
||||
s = self._new_buffer()
|
||||
g = self.clone(s)
|
||||
g.flatten(part, unixfrom=False, linesep=self._NL)
|
||||
text = s.getvalue()
|
||||
lines = text.split(self._encoded_NL)
|
||||
# Strip off the unnecessary trailing empty line
|
||||
if lines and lines[-1] == self._encoded_EMPTY:
|
||||
blocks.append(self._encoded_NL.join(lines[:-1]))
|
||||
else:
|
||||
blocks.append(text)
|
||||
# Now join all the blocks with an empty line. This has the lovely
|
||||
# effect of separating each block with an empty line, but not adding
|
||||
# an extra one after the last one.
|
||||
self._fp.write(self._encoded_NL.join(blocks))
|
||||
|
||||
def _handle_message(self, msg):
|
||||
s = self._new_buffer()
|
||||
g = self.clone(s)
|
||||
# The payload of a message/rfc822 part should be a multipart sequence
|
||||
# of length 1. The zeroth element of the list should be the Message
|
||||
# object for the subpart. Extract that object, stringify it, and
|
||||
# write it out.
|
||||
# Except, it turns out, when it's a string instead, which happens when
|
||||
# and only when HeaderParser is used on a message of mime type
|
||||
# message/rfc822. Such messages are generated by, for example,
|
||||
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
|
||||
# in that case we just emit the string body.
|
||||
payload = msg._payload
|
||||
if isinstance(payload, list):
|
||||
g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL)
|
||||
payload = s.getvalue()
|
||||
else:
|
||||
payload = self._encode(payload)
|
||||
self._fp.write(payload)
|
||||
|
||||
# This used to be a module level function; we use a classmethod for this
|
||||
# and _compile_re so we can continue to provide the module level function
|
||||
# for backward compatibility by doing
|
||||
# _make_boudary = Generator._make_boundary
|
||||
# at the end of the module. It *is* internal, so we could drop that...
|
||||
@classmethod
|
||||
def _make_boundary(cls, text=None):
|
||||
# Craft a random boundary. If text is given, ensure that the chosen
|
||||
# boundary doesn't appear in the text.
|
||||
token = random.randrange(sys.maxsize)
|
||||
boundary = ('=' * 15) + (_fmt % token) + '=='
|
||||
if text is None:
|
||||
return boundary
|
||||
b = boundary
|
||||
counter = 0
|
||||
while True:
|
||||
cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
|
||||
if not cre.search(text):
|
||||
break
|
||||
b = boundary + '.' + str(counter)
|
||||
counter += 1
|
||||
return b
|
||||
|
||||
@classmethod
|
||||
def _compile_re(cls, s, flags):
|
||||
return re.compile(s, flags)
|
||||
|
||||
class BytesGenerator(Generator):
|
||||
"""Generates a bytes version of a Message object tree.
|
||||
|
||||
Functionally identical to the base Generator except that the output is
|
||||
bytes and not string. When surrogates were used in the input to encode
|
||||
bytes, these are decoded back to bytes for output. If the policy has
|
||||
cte_type set to 7bit, then the message is transformed such that the
|
||||
non-ASCII bytes are properly content transfer encoded, using the charset
|
||||
unknown-8bit.
|
||||
|
||||
The outfp object must accept bytes in its write method.
|
||||
"""
|
||||
|
||||
# Bytes versions of this constant for use in manipulating data from
|
||||
# the BytesIO buffer.
|
||||
_encoded_EMPTY = b''
|
||||
|
||||
def write(self, s):
|
||||
self._fp.write(str(s).encode('ascii', 'surrogateescape'))
|
||||
|
||||
def _new_buffer(self):
|
||||
return BytesIO()
|
||||
|
||||
def _encode(self, s):
|
||||
return s.encode('ascii')
|
||||
|
||||
def _write_headers(self, msg):
|
||||
# This is almost the same as the string version, except for handling
|
||||
# strings with 8bit bytes.
|
||||
for h, v in msg.raw_items():
|
||||
self._fp.write(self.policy.fold_binary(h, v))
|
||||
# A blank line always separates headers from body
|
||||
self.write(self._NL)
|
||||
|
||||
def _handle_text(self, msg):
|
||||
# If the string has surrogates the original source was bytes, so
|
||||
# just write it back out.
|
||||
if msg._payload is None:
|
||||
return
|
||||
if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit':
|
||||
if self._mangle_from_:
|
||||
msg._payload = fcre.sub(">From ", msg._payload)
|
||||
self._write_lines(msg._payload)
|
||||
else:
|
||||
super(BytesGenerator,self)._handle_text(msg)
|
||||
|
||||
# Default body handler
|
||||
_writeBody = _handle_text
|
||||
|
||||
@classmethod
|
||||
def _compile_re(cls, s, flags):
|
||||
return re.compile(s.encode('ascii'), flags)
|
||||
|
||||
|
||||
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
|
||||
|
||||
class DecodedGenerator(Generator):
|
||||
"""Generates a text representation of a message.
|
||||
|
||||
Like the Generator base class, except that non-text parts are substituted
|
||||
with a format string representing the part.
|
||||
"""
|
||||
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
|
||||
"""Like Generator.__init__() except that an additional optional
|
||||
argument is allowed.
|
||||
|
||||
Walks through all subparts of a message. If the subpart is of main
|
||||
type `text', then it prints the decoded payload of the subpart.
|
||||
|
||||
Otherwise, fmt is a format string that is used instead of the message
|
||||
payload. fmt is expanded with the following keywords (in
|
||||
%(keyword)s format):
|
||||
|
||||
type : Full MIME type of the non-text part
|
||||
maintype : Main MIME type of the non-text part
|
||||
subtype : Sub-MIME type of the non-text part
|
||||
filename : Filename of the non-text part
|
||||
description: Description associated with the non-text part
|
||||
encoding : Content transfer encoding of the non-text part
|
||||
|
||||
The default value for fmt is None, meaning
|
||||
|
||||
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
|
||||
"""
|
||||
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
|
||||
if fmt is None:
|
||||
self._fmt = _FMT
|
||||
else:
|
||||
self._fmt = fmt
|
||||
|
||||
def _dispatch(self, msg):
|
||||
for part in msg.walk():
|
||||
maintype = part.get_content_maintype()
|
||||
if maintype == 'text':
|
||||
print(part.get_payload(decode=False), file=self)
|
||||
elif maintype == 'multipart':
|
||||
# Just skip this
|
||||
pass
|
||||
else:
|
||||
print(self._fmt % {
|
||||
'type' : part.get_content_type(),
|
||||
'maintype' : part.get_content_maintype(),
|
||||
'subtype' : part.get_content_subtype(),
|
||||
'filename' : part.get_filename('[no filename]'),
|
||||
'description': part.get('Content-Description',
|
||||
'[no description]'),
|
||||
'encoding' : part.get('Content-Transfer-Encoding',
|
||||
'[no encoding]'),
|
||||
}, file=self)
|
||||
|
||||
|
||||
# Helper used by Generator._make_boundary
|
||||
_width = len(repr(sys.maxsize-1))
|
||||
_fmt = '%%0%dd' % _width
|
||||
|
||||
# Backward compatibility
|
||||
_make_boundary = Generator._make_boundary
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue