mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-26 05:42:15 +01:00
nuke integration wip
This commit is contained in:
parent
bb17ef7221
commit
01f01997b8
27 changed files with 1541 additions and 408 deletions
|
|
@ -23,9 +23,12 @@ def install():
|
|||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
# Disable all families except for the ones we explicitly want to see
|
||||
family_states = ["studio.imagesequence",
|
||||
"studio.camera",
|
||||
"studio.pointcache"]
|
||||
family_states = [
|
||||
"studio.imagesequence",
|
||||
"studio.camera",
|
||||
"studio.pointcache",
|
||||
"studio.workfile"
|
||||
]
|
||||
|
||||
avalon.data["familiesStateDefault"] = False
|
||||
avalon.data["familiesStateToggled"] = family_states
|
||||
|
|
|
|||
|
|
@ -3,46 +3,40 @@ import sys
|
|||
from avalon.vendor.Qt import QtGui
|
||||
import avalon.nuke
|
||||
|
||||
import nuke
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._project = None
|
||||
|
||||
|
||||
def update_frame_range(start, end, root=None, set_render_range=True):
|
||||
"""Set Fusion comp's start and end frame range
|
||||
def update_frame_range(start, end, root=None):
|
||||
"""Set Nuke script start and end frame range
|
||||
|
||||
Args:
|
||||
start (float, int): start frame
|
||||
end (float, int): end frame
|
||||
comp (object, Optional): comp object from fusion
|
||||
set_render_range (bool, Optional): When True this will also set the
|
||||
composition's render start and end frame.
|
||||
root (object, Optional): root object from nuke's script
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
if not root:
|
||||
root, nodes = avalon.nuke.get_current_comp()
|
||||
|
||||
knobs = {
|
||||
"COMPN_GlobalStart": start,
|
||||
"COMPN_GlobalEnd": end
|
||||
"first_frame": start,
|
||||
"last_frame": end
|
||||
}
|
||||
|
||||
if set_render_range:
|
||||
knobs.update({
|
||||
"COMPN_RenderStart": start,
|
||||
"COMPN_RenderEnd": end
|
||||
})
|
||||
|
||||
with avalon.nuke.comp_lock_and_undo_chunk():
|
||||
comp.SetAttrs(attrs)
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
for key, value in knobs.items():
|
||||
if root:
|
||||
root[key].setValue(value)
|
||||
else:
|
||||
nuke.root()[key].setValue(value)
|
||||
|
||||
|
||||
def get_additional_data(container):
|
||||
"""Get Fusion related data for the container
|
||||
"""Get Nuke's related data for the container
|
||||
|
||||
Args:
|
||||
container(dict): the container found by the ls() function
|
||||
|
|
@ -51,11 +45,16 @@ def get_additional_data(container):
|
|||
dict
|
||||
"""
|
||||
|
||||
tool = container["_tool"]
|
||||
tile_color = tool.TileColor
|
||||
node = container["_tool"]
|
||||
tile_color = node['tile_color'].value()
|
||||
if tile_color is None:
|
||||
return {}
|
||||
|
||||
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
|
||||
tile_color["G"],
|
||||
tile_color["B"])}
|
||||
hex = '%08x' % tile_color
|
||||
rgba = [
|
||||
float(int(hex[0:2], 16)) / 255.0,
|
||||
float(int(hex[2:4], 16)) / 255.0,
|
||||
float(int(hex[4:6], 16)) / 255.0
|
||||
]
|
||||
|
||||
return {"color": QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
from avalon import api, style
|
||||
from avalon.vendor.Qt import QtGui, QtWidgets
|
||||
|
||||
import avalon.fusion
|
||||
import avalon.nuke
|
||||
|
||||
|
||||
class FusionSetToolColor(api.InventoryAction):
|
||||
class NukeSetToolColor(api.InventoryAction):
|
||||
"""Update the color of the selected tools"""
|
||||
|
||||
label = "Set Tool Color"
|
||||
|
|
@ -16,15 +16,20 @@ class FusionSetToolColor(api.InventoryAction):
|
|||
"""Color all selected tools the selected colors"""
|
||||
|
||||
result = []
|
||||
comp = avalon.fusion.get_current_comp()
|
||||
|
||||
# Get tool color
|
||||
first = containers[0]
|
||||
tool = first["_tool"]
|
||||
color = tool.TileColor
|
||||
node = first["_tool"]
|
||||
color = node["tile_color"].value()
|
||||
hex = '%08x' % color
|
||||
rgba = [
|
||||
float(int(hex[0:2], 16)) / 255.0,
|
||||
float(int(hex[2:4], 16)) / 255.0,
|
||||
float(int(hex[4:6], 16)) / 255.0
|
||||
]
|
||||
|
||||
if color is not None:
|
||||
qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"])
|
||||
qcolor = QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])
|
||||
else:
|
||||
qcolor = self._fallback_color
|
||||
|
||||
|
|
@ -33,15 +38,21 @@ class FusionSetToolColor(api.InventoryAction):
|
|||
if not picked_color:
|
||||
return
|
||||
|
||||
with avalon.fusion.comp_lock_and_undo_chunk(comp):
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
for container in containers:
|
||||
# Convert color to RGB 0-1 floats
|
||||
rgb_f = picked_color.getRgbF()
|
||||
rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
|
||||
|
||||
hexColour = int(
|
||||
'%02x%02x%02x%02x' % (
|
||||
rgb_f[0]*255,
|
||||
rgb_f[1]*255,
|
||||
rgb_f[2]*255,
|
||||
1),
|
||||
16
|
||||
)
|
||||
# Update tool
|
||||
tool = container["_tool"]
|
||||
tool.TileColor = rgb_f_table
|
||||
node = container["_tool"]
|
||||
node['tile_color'].value(hexColour)
|
||||
|
||||
result.append(container)
|
||||
|
||||
|
|
|
|||
|
|
@ -34,8 +34,12 @@ class CollectDeadlineUser(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = "Deadline User"
|
||||
hosts = ['maya', 'fusion']
|
||||
families = ["studio.renderlayer", "studio.saver.deadline"]
|
||||
hosts = ['maya', 'fusion', 'nuke']
|
||||
families = [
|
||||
"studio.renderlayer",
|
||||
"studio.saver.deadline",
|
||||
"studio.imagesequence"
|
||||
]
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
|
|
@ -49,4 +53,3 @@ class CollectDeadlineUser(pyblish.api.ContextPlugin):
|
|||
|
||||
self.log.info("Found Deadline user: {}".format(user))
|
||||
context.data['deadlineUser'] = user
|
||||
|
||||
|
|
|
|||
|
|
@ -88,6 +88,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder
|
||||
targets = ["filesequence"]
|
||||
label = "File Sequences"
|
||||
hosts = ['maya']
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
126
config/plugins/global/publish/collect_json.py
Normal file
126
config/plugins/global/publish/collect_json.py
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
|
||||
import pyblish.api
|
||||
from config.vendor import clique
|
||||
|
||||
|
||||
class CollectJSON(pyblish.api.ContextPlugin):
|
||||
""" Collecting the json files in current directory. """
|
||||
|
||||
label = "JSON"
|
||||
order = pyblish.api.CollectorOrder
|
||||
|
||||
def version_get(self, string, prefix):
|
||||
""" Extract version information from filenames. Code from Foundry"s
|
||||
nukescripts.version_get()
|
||||
"""
|
||||
|
||||
regex = r"[/_.]{}\d+".format(prefix)
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
|
||||
if not len(matches):
|
||||
msg = "No '_{}#' found in '{}'".format(prefix, string)
|
||||
raise ValueError(msg)
|
||||
return matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group()
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data("currentFile")
|
||||
# Skip if current file is not a directory
|
||||
if not os.path.isdir(current_file):
|
||||
return
|
||||
|
||||
# Traverse directory and collect collections from json files.
|
||||
instances = []
|
||||
for root, dirs, files in os.walk(current_file):
|
||||
for f in files:
|
||||
if f.endswith(".json"):
|
||||
with open(os.path.join(root, f)) as json_data:
|
||||
for data in json.load(json_data):
|
||||
instances.append(data)
|
||||
|
||||
# Validate instance based on supported families.
|
||||
valid_families = ["img", "cache", "scene", "mov"]
|
||||
valid_data = []
|
||||
for data in instances:
|
||||
families = data.get("families", []) + [data["family"]]
|
||||
family_type = list(set(families) & set(valid_families))
|
||||
if family_type:
|
||||
valid_data.append(data)
|
||||
|
||||
# Create existing output instance.
|
||||
scanned_dirs = []
|
||||
files = []
|
||||
collections = []
|
||||
for data in valid_data:
|
||||
if "collection" not in data.keys():
|
||||
continue
|
||||
if data["collection"] is None:
|
||||
continue
|
||||
|
||||
instance_collection = clique.parse(data["collection"])
|
||||
|
||||
try:
|
||||
version = self.version_get(
|
||||
os.path.basename(instance_collection.format()), "v"
|
||||
)[1]
|
||||
except KeyError:
|
||||
# Ignore any output that is not versioned
|
||||
continue
|
||||
|
||||
# Getting collections of all previous versions and current version
|
||||
for count in range(1, int(version) + 1):
|
||||
|
||||
# Generate collection
|
||||
version_string = "v" + str(count).zfill(len(version))
|
||||
head = instance_collection.head.replace(
|
||||
"v" + version, version_string
|
||||
)
|
||||
collection = clique.Collection(
|
||||
head=head.replace("\\", "/"),
|
||||
padding=instance_collection.padding,
|
||||
tail=instance_collection.tail
|
||||
)
|
||||
collection.version = count
|
||||
|
||||
# Scan collection directory
|
||||
scan_dir = os.path.dirname(collection.head)
|
||||
if scan_dir not in scanned_dirs and os.path.exists(scan_dir):
|
||||
for f in os.listdir(scan_dir):
|
||||
file_path = os.path.join(scan_dir, f)
|
||||
files.append(file_path.replace("\\", "/"))
|
||||
scanned_dirs.append(scan_dir)
|
||||
|
||||
# Match files to collection and add
|
||||
for f in files:
|
||||
if collection.match(f):
|
||||
collection.add(f)
|
||||
|
||||
# Skip if no files were found in the collection
|
||||
if not list(collection):
|
||||
continue
|
||||
|
||||
# Skip existing collections
|
||||
if collection in collections:
|
||||
continue
|
||||
|
||||
instance = context.create_instance(name=data["name"])
|
||||
version = self.version_get(
|
||||
os.path.basename(collection.format()), "v"
|
||||
)[1]
|
||||
|
||||
basename = os.path.basename(collection.format())
|
||||
instance.data["label"] = "{0} - {1}".format(
|
||||
data["name"], basename
|
||||
)
|
||||
|
||||
families = data["families"] + [data["family"]]
|
||||
family = list(set(valid_families) & set(families))[0]
|
||||
instance.data["family"] = family
|
||||
instance.data["families"] = ["output"]
|
||||
instance.data["collection"] = collection
|
||||
instance.data["version"] = int(version)
|
||||
instance.data["publish"] = False
|
||||
|
||||
collections.append(collection)
|
||||
49
config/plugins/global/publish/extract_json.py
Normal file
49
config/plugins/global/publish/extract_json.py
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
import json
|
||||
import datetime
|
||||
import time
|
||||
|
||||
import pyblish.api
|
||||
from config.vendor import clique
|
||||
|
||||
|
||||
class ExtractJSON(pyblish.api.ContextPlugin):
|
||||
""" Extract all instances to a serialized json file. """
|
||||
|
||||
order = pyblish.api.IntegratorOrder
|
||||
label = "JSON"
|
||||
|
||||
def process(self, context):
|
||||
|
||||
workspace = os.path.join(
|
||||
os.path.dirname(context.data["currentFile"]), "workspace",
|
||||
"instances")
|
||||
|
||||
if not os.path.exists(workspace):
|
||||
os.makedirs(workspace)
|
||||
|
||||
output_data = []
|
||||
for instance in context:
|
||||
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
if isinstance(value, clique.Collection):
|
||||
value = value.format()
|
||||
|
||||
try:
|
||||
json.dumps(value)
|
||||
data[key] = value
|
||||
except KeyError:
|
||||
msg = "\"{0}\"".format(value)
|
||||
msg += " in instance.data[\"{0}\"]".format(key)
|
||||
msg += " could not be serialized."
|
||||
self.log.debug(msg)
|
||||
|
||||
output_data.append(data)
|
||||
|
||||
timestamp = datetime.datetime.fromtimestamp(
|
||||
time.time()).strftime("%Y%m%d-%H%M%S")
|
||||
filename = timestamp + "_instances.json"
|
||||
|
||||
with open(os.path.join(workspace, filename), "w") as outfile:
|
||||
outfile.write(json.dumps(output_data, indent=4, sort_keys=True))
|
||||
|
|
@ -122,8 +122,12 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Submit image sequence jobs to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["fusion", "maya"]
|
||||
families = ["studio.saver.deadline", "studio.renderlayer"]
|
||||
hosts = ["fusion", "maya", "nuke"]
|
||||
families = [
|
||||
"studio.saver.deadline",
|
||||
"studio.renderlayer",
|
||||
"studio.imagesequence"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ class ValidateCurrentSaveFile(pyblish.api.ContextPlugin):
|
|||
|
||||
label = "Validate File Saved"
|
||||
order = pyblish.api.ValidatorOrder - 0.1
|
||||
hosts = ["maya", "houdini"]
|
||||
hosts = ["maya", "houdini", "nuke"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import sys
|
||||
import avalon.api
|
||||
import avalon.nuke
|
||||
import nuke
|
||||
|
||||
|
||||
class CrateWriteExr(avalon.api.Creator):
|
||||
|
|
@ -16,10 +16,10 @@ class CrateWriteExr(avalon.api.Creator):
|
|||
# self.data.setdefault("subset", "this")
|
||||
|
||||
def process(self):
|
||||
nuke = getattr(sys.modules["__main__"], "nuke", None)
|
||||
# nuke = getattr(sys.modules["__main__"], "nuke", None)
|
||||
data = {}
|
||||
ext = "exr"
|
||||
root, nodes = avalon.nuke.get_current_script(nuke=nuke)
|
||||
root, nodes = avalon.nuke.get_current_script()
|
||||
|
||||
# todo: improve method of getting current environment
|
||||
# todo: pref avalon.Session over os.environ
|
||||
|
|
@ -34,7 +34,7 @@ class CrateWriteExr(avalon.api.Creator):
|
|||
filename
|
||||
).replace("\\", "/")
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop(nuke):
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
w = nuke.createNode(
|
||||
"Write",
|
||||
"name {}".format(self.name))
|
||||
|
|
|
|||
|
|
@ -1,25 +1,21 @@
|
|||
# from avalon import api
|
||||
#
|
||||
#
|
||||
# class FusionSelectContainers(api.InventoryAction):
|
||||
#
|
||||
# label = "Select Containers"
|
||||
# icon = "mouse-pointer"
|
||||
# color = "#d8d8d8"
|
||||
#
|
||||
# def process(self, containers):
|
||||
#
|
||||
# import avalon.fusion
|
||||
#
|
||||
# tools = [i["_tool"] for i in containers]
|
||||
#
|
||||
# comp = avalon.fusion.get_current_comp()
|
||||
# flow = comp.CurrentFrame.FlowView
|
||||
#
|
||||
# with avalon.fusion.comp_lock_and_undo_chunk(comp, self.label):
|
||||
# # Clear selection
|
||||
# flow.Select()
|
||||
#
|
||||
# # Select tool
|
||||
# for tool in tools:
|
||||
# flow.Select(tool)
|
||||
from avalon import api
|
||||
|
||||
|
||||
class NukeSelectContainers(api.InventoryAction):
|
||||
|
||||
label = "Select Containers"
|
||||
icon = "mouse-pointer"
|
||||
color = "#d8d8d8"
|
||||
|
||||
def process(self, containers):
|
||||
|
||||
import avalon.nuke
|
||||
|
||||
nodes = [i["_tool"] for i in containers]
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
# clear previous_selection
|
||||
[n['selected'].setValue(False) for n in nodes]
|
||||
# Select tool
|
||||
for node in nodes:
|
||||
node["selected"].setValue(True)
|
||||
|
|
|
|||
|
|
@ -1,76 +1,76 @@
|
|||
# """A module containing generic loader actions that will display in the Loader.
|
||||
#
|
||||
# """
|
||||
#
|
||||
# from avalon import api
|
||||
#
|
||||
#
|
||||
# class FusionSetFrameRangeLoader(api.Loader):
|
||||
# """Specific loader of Alembic for the avalon.animation family"""
|
||||
#
|
||||
# families = ["studio.animation",
|
||||
# "studio.camera",
|
||||
# "studio.imagesequence",
|
||||
# "studio.yeticache",
|
||||
# "studio.pointcache"]
|
||||
# representations = ["*"]
|
||||
#
|
||||
# label = "Set frame range"
|
||||
# order = 11
|
||||
# icon = "clock-o"
|
||||
# color = "white"
|
||||
#
|
||||
# def load(self, context, name, namespace, data):
|
||||
#
|
||||
# from config.fusion import lib
|
||||
#
|
||||
# version = context['version']
|
||||
# version_data = version.get("data", {})
|
||||
#
|
||||
# start = version_data.get("startFrame", None)
|
||||
# end = version_data.get("endFrame", None)
|
||||
#
|
||||
# if start is None or end is None:
|
||||
# print("Skipping setting frame range because start or "
|
||||
# "end frame data is missing..")
|
||||
# return
|
||||
#
|
||||
# lib.update_frame_range(start, end)
|
||||
#
|
||||
#
|
||||
# class FusionSetFrameRangeWithHandlesLoader(api.Loader):
|
||||
# """Specific loader of Alembic for the avalon.animation family"""
|
||||
#
|
||||
# families = ["studio.animation",
|
||||
# "studio.camera",
|
||||
# "studio.imagesequence",
|
||||
# "studio.yeticache",
|
||||
# "studio.pointcache"]
|
||||
# representations = ["*"]
|
||||
#
|
||||
# label = "Set frame range (with handles)"
|
||||
# order = 12
|
||||
# icon = "clock-o"
|
||||
# color = "white"
|
||||
#
|
||||
# def load(self, context, name, namespace, data):
|
||||
#
|
||||
# from config.fusion import lib
|
||||
#
|
||||
# version = context['version']
|
||||
# version_data = version.get("data", {})
|
||||
#
|
||||
# start = version_data.get("startFrame", None)
|
||||
# end = version_data.get("endFrame", None)
|
||||
#
|
||||
# if start is None or end is None:
|
||||
# print("Skipping setting frame range because start or "
|
||||
# "end frame data is missing..")
|
||||
# return
|
||||
#
|
||||
# # Include handles
|
||||
# handles = version_data.get("handles", 0)
|
||||
# start -= handles
|
||||
# end += handles
|
||||
#
|
||||
# lib.update_frame_range(start, end)
|
||||
"""A module containing generic loader actions that will display in the Loader.
|
||||
|
||||
"""
|
||||
|
||||
from avalon import api
|
||||
|
||||
|
||||
class NukeSetFrameRangeLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.imagesequence",
|
||||
"studio.yeticache",
|
||||
"studio.pointcache"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Set frame range"
|
||||
order = 11
|
||||
icon = "clock-o"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from config.nuke import lib
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
start = version_data.get("startFrame", None)
|
||||
end = version_data.get("endFrame", None)
|
||||
|
||||
if start is None or end is None:
|
||||
print("Skipping setting frame range because start or "
|
||||
"end frame data is missing..")
|
||||
return
|
||||
|
||||
lib.update_frame_range(start, end)
|
||||
|
||||
|
||||
class NukeSetFrameRangeWithHandlesLoader(api.Loader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["studio.animation",
|
||||
"studio.camera",
|
||||
"studio.imagesequence",
|
||||
"studio.yeticache",
|
||||
"studio.pointcache"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Set frame range (with handles)"
|
||||
order = 12
|
||||
icon = "clock-o"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from config.nuke import lib
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
start = version_data.get("startFrame", None)
|
||||
end = version_data.get("endFrame", None)
|
||||
|
||||
if start is None or end is None:
|
||||
print("Skipping setting frame range because start or "
|
||||
"end frame data is missing..")
|
||||
return
|
||||
|
||||
# Include handles
|
||||
handles = version_data.get("handles", 0)
|
||||
start -= handles
|
||||
end += handles
|
||||
|
||||
lib.update_frame_range(start, end)
|
||||
|
|
|
|||
|
|
@ -1,259 +1,252 @@
|
|||
# import os
|
||||
# import contextlib
|
||||
#
|
||||
# from avalon import api
|
||||
# import avalon.io as io
|
||||
#
|
||||
#
|
||||
# @contextlib.contextmanager
|
||||
# def preserve_inputs(tool, inputs):
|
||||
# """Preserve the tool's inputs after context"""
|
||||
#
|
||||
# comp = tool.Comp()
|
||||
#
|
||||
# values = {}
|
||||
# for name in inputs:
|
||||
# tool_input = getattr(tool, name)
|
||||
# value = tool_input[comp.TIME_UNDEFINED]
|
||||
# values[name] = value
|
||||
#
|
||||
# try:
|
||||
# yield
|
||||
# finally:
|
||||
# for name, value in values.items():
|
||||
# tool_input = getattr(tool, name)
|
||||
# tool_input[comp.TIME_UNDEFINED] = value
|
||||
#
|
||||
#
|
||||
# @contextlib.contextmanager
|
||||
# def preserve_trim(loader, log=None):
|
||||
# """Preserve the relative trim of the Loader tool.
|
||||
#
|
||||
# This tries to preserve the loader's trim (trim in and trim out) after
|
||||
# the context by reapplying the "amount" it trims on the clip's length at
|
||||
# start and end.
|
||||
#
|
||||
# """
|
||||
#
|
||||
# # Get original trim as amount of "trimming" from length
|
||||
# time = loader.Comp().TIME_UNDEFINED
|
||||
# length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
|
||||
# trim_from_start = loader["ClipTimeStart"][time]
|
||||
# trim_from_end = length - loader["ClipTimeEnd"][time]
|
||||
#
|
||||
# try:
|
||||
# yield
|
||||
# finally:
|
||||
#
|
||||
# length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
|
||||
# if trim_from_start > length:
|
||||
# trim_from_start = length
|
||||
# if log:
|
||||
# log.warning("Reducing trim in to %d "
|
||||
# "(because of less frames)" % trim_from_start)
|
||||
#
|
||||
# remainder = length - trim_from_start
|
||||
# if trim_from_end > remainder:
|
||||
# trim_from_end = remainder
|
||||
# if log:
|
||||
# log.warning("Reducing trim in to %d "
|
||||
# "(because of less frames)" % trim_from_end)
|
||||
#
|
||||
# loader["ClipTimeStart"][time] = trim_from_start
|
||||
# loader["ClipTimeEnd"][time] = length - trim_from_end
|
||||
#
|
||||
#
|
||||
# def loader_shift(loader, frame, relative=True):
|
||||
# """Shift global in time by i preserving duration
|
||||
#
|
||||
# This moves the loader by i frames preserving global duration. When relative
|
||||
# is False it will shift the global in to the start frame.
|
||||
#
|
||||
# Args:
|
||||
# loader (tool): The fusion loader tool.
|
||||
# frame (int): The amount of frames to move.
|
||||
# relative (bool): When True the shift is relative, else the shift will
|
||||
# change the global in to frame.
|
||||
#
|
||||
# Returns:
|
||||
# int: The resulting relative frame change (how much it moved)
|
||||
#
|
||||
# """
|
||||
# comp = loader.Comp()
|
||||
# time = comp.TIME_UNDEFINED
|
||||
#
|
||||
# old_in = loader["GlobalIn"][time]
|
||||
# old_out = loader["GlobalOut"][time]
|
||||
#
|
||||
# if relative:
|
||||
# shift = frame
|
||||
# else:
|
||||
# shift = frame - old_in
|
||||
#
|
||||
# # Shifting global in will try to automatically compensate for the change
|
||||
# # in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
|
||||
# # input values to "just shift" the clip
|
||||
# with preserve_inputs(loader, inputs=["ClipTimeStart",
|
||||
# "ClipTimeEnd",
|
||||
# "HoldFirstFrame",
|
||||
# "HoldLastFrame"]):
|
||||
#
|
||||
# # GlobalIn cannot be set past GlobalOut or vice versa
|
||||
# # so we must apply them in the order of the shift.
|
||||
# if shift > 0:
|
||||
# loader["GlobalOut"][time] = old_out + shift
|
||||
# loader["GlobalIn"][time] = old_in + shift
|
||||
# else:
|
||||
# loader["GlobalIn"][time] = old_in + shift
|
||||
# loader["GlobalOut"][time] = old_out + shift
|
||||
#
|
||||
# return int(shift)
|
||||
#
|
||||
#
|
||||
# class FusionLoadSequence(api.Loader):
|
||||
# """Load image sequence into Fusion"""
|
||||
#
|
||||
# families = ["studio.imagesequence"]
|
||||
# representations = ["*"]
|
||||
#
|
||||
# label = "Load sequence"
|
||||
# order = -10
|
||||
# icon = "code-fork"
|
||||
# color = "orange"
|
||||
#
|
||||
# def load(self, context, name, namespace, data):
|
||||
#
|
||||
# from avalon.fusion import (
|
||||
# imprint_container,
|
||||
# get_current_comp,
|
||||
# comp_lock_and_undo_chunk
|
||||
# )
|
||||
#
|
||||
# # Fallback to asset name when namespace is None
|
||||
# if namespace is None:
|
||||
# namespace = context['asset']['name']
|
||||
#
|
||||
# # Use the first file for now
|
||||
# path = self._get_first_image(self.fname)
|
||||
#
|
||||
# # Create the Loader with the filename path set
|
||||
# comp = get_current_comp()
|
||||
# with comp_lock_and_undo_chunk(comp, "Create Loader"):
|
||||
#
|
||||
# args = (-32768, -32768)
|
||||
# tool = comp.AddTool("Loader", *args)
|
||||
# tool["Clip"] = path
|
||||
#
|
||||
# # Set global in point to start frame (if in version.data)
|
||||
# start = context["version"]["data"].get("startFrame", None)
|
||||
# if start is not None:
|
||||
# loader_shift(tool, start, relative=False)
|
||||
#
|
||||
# imprint_container(tool,
|
||||
# name=name,
|
||||
# namespace=namespace,
|
||||
# context=context,
|
||||
# loader=self.__class__.__name__)
|
||||
#
|
||||
# def switch(self, container, representation):
|
||||
# self.update(container, representation)
|
||||
#
|
||||
# def update(self, container, representation):
|
||||
# """Update the Loader's path
|
||||
#
|
||||
# Fusion automatically tries to reset some variables when changing
|
||||
# the loader's path to a new file. These automatic changes are to its
|
||||
# inputs:
|
||||
# - ClipTimeStart: Fusion reset to 0 if duration changes
|
||||
# - We keep the trim in as close as possible to the previous value.
|
||||
# When there are less frames then the amount of trim we reduce
|
||||
# it accordingly.
|
||||
#
|
||||
# - ClipTimeEnd: Fusion reset to 0 if duration changes
|
||||
# - We keep the trim out as close as possible to the previous value
|
||||
# within new amount of frames after trim in (ClipTimeStart) has
|
||||
# been set.
|
||||
#
|
||||
# - GlobalIn: Fusion reset to comp's global in if duration changes
|
||||
# - We change it to the "startFrame"
|
||||
#
|
||||
# - GlobalEnd: Fusion resets to globalIn + length if duration changes
|
||||
# - We do the same like Fusion - allow fusion to take control.
|
||||
#
|
||||
# - HoldFirstFrame: Fusion resets this to 0
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - HoldLastFrame: Fusion resets this to 0
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - Reverse: Fusion resets to disabled if "Loop" is not enabled.
|
||||
# - We preserve the value.
|
||||
#
|
||||
# - Depth: Fusion resets to "Format"
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - KeyCode: Fusion resets to ""
|
||||
# - We preverse the value.
|
||||
#
|
||||
# - TimeCodeOffset: Fusion resets to 0
|
||||
# - We preverse the value.
|
||||
#
|
||||
# """
|
||||
#
|
||||
# from avalon.fusion import comp_lock_and_undo_chunk
|
||||
#
|
||||
# tool = container["_tool"]
|
||||
# assert tool.ID == "Loader", "Must be Loader"
|
||||
# comp = tool.Comp()
|
||||
#
|
||||
# root = api.get_representation_path(representation)
|
||||
# path = self._get_first_image(root)
|
||||
#
|
||||
# # Get start frame from version data
|
||||
# version = io.find_one({"type": "version",
|
||||
# "_id": representation["parent"]})
|
||||
# start = version["data"].get("startFrame")
|
||||
# if start is None:
|
||||
# self.log.warning("Missing start frame for updated version"
|
||||
# "assuming starts at frame 0 for: "
|
||||
# "{} ({})".format(tool.Name, representation))
|
||||
# start = 0
|
||||
#
|
||||
# with comp_lock_and_undo_chunk(comp, "Update Loader"):
|
||||
#
|
||||
# # Update the loader's path whilst preserving some values
|
||||
# with preserve_trim(tool, log=self.log):
|
||||
# with preserve_inputs(tool,
|
||||
# inputs=("HoldFirstFrame",
|
||||
# "HoldLastFrame",
|
||||
# "Reverse",
|
||||
# "Depth",
|
||||
# "KeyCode",
|
||||
# "TimeCodeOffset")):
|
||||
# tool["Clip"] = path
|
||||
#
|
||||
# # Set the global in to the start frame of the sequence
|
||||
# global_in_changed = loader_shift(tool, start, relative=False)
|
||||
# if global_in_changed:
|
||||
# # Log this change to the user
|
||||
# self.log.debug("Changed '%s' global in: %d" % (tool.Name,
|
||||
# start))
|
||||
#
|
||||
# # Update the imprinted representation
|
||||
# tool.SetData("avalon.representation", str(representation["_id"]))
|
||||
#
|
||||
# def remove(self, container):
|
||||
#
|
||||
# from avalon.fusion import comp_lock_and_undo_chunk
|
||||
#
|
||||
# tool = container["_tool"]
|
||||
# assert tool.ID == "Loader", "Must be Loader"
|
||||
# comp = tool.Comp()
|
||||
#
|
||||
# with comp_lock_and_undo_chunk(comp, "Remove Loader"):
|
||||
# tool.Delete()
|
||||
#
|
||||
# def _get_first_image(self, root):
|
||||
# """Get first file in representation root"""
|
||||
# files = sorted(os.listdir(root))
|
||||
# return os.path.join(root, files[0])
|
||||
import os
|
||||
import contextlib
|
||||
|
||||
from avalon import api
|
||||
import avalon.io as io
|
||||
|
||||
from avalon.nuke import log
|
||||
import nuke
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_inputs(node, knobs):
|
||||
"""Preserve the node's inputs after context"""
|
||||
|
||||
values = {}
|
||||
for name in knobs:
|
||||
try:
|
||||
knob_value = node[name].vaule()
|
||||
values[name] = knob_value
|
||||
except ValueError:
|
||||
log.warning("missing knob {} in node {}"
|
||||
"{}".format(name, node['name'].value()))
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for name, value in values.items():
|
||||
node[name].setValue(value)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_trim(node):
|
||||
"""Preserve the relative trim of the Loader tool.
|
||||
|
||||
This tries to preserve the loader's trim (trim in and trim out) after
|
||||
the context by reapplying the "amount" it trims on the clip's length at
|
||||
start and end.
|
||||
|
||||
"""
|
||||
# working script frame range
|
||||
script_start = nuke.root()["start_frame"].value()
|
||||
|
||||
start_at_frame = None
|
||||
offset_frame = None
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start))
|
||||
log.info("start frame of reader was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str((script_start + offset_frame)))
|
||||
log.info("start frame of reader was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
|
||||
def loader_shift(node, frame, relative=True):
|
||||
"""Shift global in time by i preserving duration
|
||||
|
||||
This moves the loader by i frames preserving global duration. When relative
|
||||
is False it will shift the global in to the start frame.
|
||||
|
||||
Args:
|
||||
loader (tool): The fusion loader tool.
|
||||
frame (int): The amount of frames to move.
|
||||
relative (bool): When True the shift is relative, else the shift will
|
||||
change the global in to frame.
|
||||
|
||||
Returns:
|
||||
int: The resulting relative frame change (how much it moved)
|
||||
|
||||
"""
|
||||
# working script frame range
|
||||
script_start = nuke.root()["start_frame"].value()
|
||||
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
if relative:
|
||||
shift = frame
|
||||
else:
|
||||
if start_at_frame:
|
||||
shift = frame
|
||||
if offset_frame:
|
||||
shift = frame + offset_frame
|
||||
|
||||
# Shifting global in will try to automatically compensate for the change
|
||||
# in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
|
||||
# input values to "just shift" the clip
|
||||
with preserve_inputs(node, knobs=["file",
|
||||
"first",
|
||||
"last",
|
||||
"originfirst",
|
||||
"originlast",
|
||||
"frame_mode",
|
||||
"frame"]):
|
||||
|
||||
# GlobalIn cannot be set past GlobalOut or vice versa
|
||||
# so we must apply them in the order of the shift.
|
||||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start + shift))
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str(shift))
|
||||
|
||||
return int(shift)
|
||||
|
||||
|
||||
class NukeLoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["studio.imagesequence"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Load sequence"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
ls_img_sequence,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
# Use the first file for now
|
||||
# TODO: fix path fname
|
||||
file = ls_img_sequence(os.path.dirname(self.fname), one=True)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
# TODO: it might be universal read to img/geo/camera
|
||||
r = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(self.name)) # TODO: does self.name exist?
|
||||
r["file"].setValue(file['path'])
|
||||
if len(file['frames']) is 1:
|
||||
first = file['frames'][0][0]
|
||||
last = file['frames'][0][1]
|
||||
r["originfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["originlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
else:
|
||||
first = file['frames'][0][0]
|
||||
last = file['frames'][:-1][1]
|
||||
r["originfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["originlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
log.warning("Missing frames in image sequence")
|
||||
|
||||
# Set global in point to start frame (if in version.data)
|
||||
start = context["version"]["data"].get("startFrame", None)
|
||||
if start is not None:
|
||||
loader_shift(r, start, relative=False)
|
||||
|
||||
containerise(r,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Fusion automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
viewer_update_and_undo_stop,
|
||||
ls_img_sequence,
|
||||
update_container
|
||||
)
|
||||
|
||||
node = container["_tool"]
|
||||
# TODO: prepare also for other readers img/geo/camera
|
||||
assert node.Class() == "Reader", "Must be Reader"
|
||||
|
||||
root = api.get_representation_path(representation)
|
||||
file = ls_img_sequence(os.path.dirname(root), one=True)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({"type": "version",
|
||||
"_id": representation["parent"]})
|
||||
start = version["data"].get("startFrame")
|
||||
if start is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(node['name'].value(), representation))
|
||||
start = 0
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
|
||||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
with preserve_inputs(node,
|
||||
knobs=["file",
|
||||
"first",
|
||||
"last",
|
||||
"originfirst",
|
||||
"originlast",
|
||||
"frame_mode",
|
||||
"frame"]):
|
||||
node["file"] = file["path"]
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
global_in_changed = loader_shift(node, start, relative=False)
|
||||
if global_in_changed:
|
||||
# Log this change to the user
|
||||
log.debug("Changed '{}' global in:"
|
||||
" {:d}".format(node['name'].value(), start))
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
node,
|
||||
{"representation": str(representation["_id"])}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
node = container["_tool"]
|
||||
assert node.Class() == "Reader", "Must be Reader"
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
18
config/plugins/nuke/publish/collect_current_file.py
Normal file
18
config/plugins/nuke/publish/collect_current_file.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Current File"
|
||||
hosts = ["nuke"]
|
||||
families = ["studio.workfile"]
|
||||
|
||||
def process(self, context):
|
||||
import os
|
||||
import nuke
|
||||
current_file = nuke.root().name()
|
||||
normalised = os.path.normpath(current_file)
|
||||
|
||||
context.data["currentFile"] = normalised
|
||||
17
config/plugins/nuke/publish/collect_framerate.py
Normal file
17
config/plugins/nuke/publish/collect_framerate.py
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFramerate(pyblish.api.ContextPlugin):
|
||||
"""Collect framerate."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Framerate"
|
||||
hosts = [
|
||||
"nuke",
|
||||
"nukeassist"
|
||||
]
|
||||
|
||||
def process(self, context):
|
||||
context.data["framerate"] = nuke.root()["fps"].getValue()
|
||||
14
config/plugins/nuke/publish/collect_host.py
Normal file
14
config/plugins/nuke/publish/collect_host.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHost(pyblish.api.ContextPlugin):
|
||||
"""Inject the host into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Host"
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, context):
|
||||
import pyblish.api
|
||||
|
||||
context.data["host"] = pyblish.api.current_host()
|
||||
13
config/plugins/nuke/publish/collect_host_version.py
Normal file
13
config/plugins/nuke/publish/collect_host_version.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHostVersion(pyblish.api.ContextPlugin):
|
||||
"""Inject the hosts version into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Host Version"
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, context):
|
||||
import nuke
|
||||
context.data["hostVersion"] = nuke.NUKE_VERSION_STRING
|
||||
14
config/plugins/nuke/publish/collect_selection.py
Normal file
14
config/plugins/nuke/publish/collect_selection.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectSelection(pyblish.api.ContextPlugin):
|
||||
"""Collect selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Selection of Nodes"
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, context):
|
||||
context.data["selection"] = nuke.selectedNodes()
|
||||
29
config/plugins/nuke/publish/extract_output_directory.py
Normal file
29
config/plugins/nuke/publish/extract_output_directory.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ExtractOutputDirectory(pyblish.api.InstancePlugin):
|
||||
"""Extracts the output path for any collection or single output_path."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Output Directory"
|
||||
optional = True
|
||||
|
||||
# targets = ["process"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
path = None
|
||||
|
||||
if "collection" in instance.data.keys():
|
||||
path = instance.data["collection"].format()
|
||||
|
||||
if "output_path" in instance.data.keys():
|
||||
path = instance.data["output_path"]
|
||||
|
||||
if not path:
|
||||
return
|
||||
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
15
config/plugins/nuke/publish/extract_script_save.py
Normal file
15
config/plugins/nuke/publish/extract_script_save.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
import nuke
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ExtractScriptSave(pyblish.api.InstancePlugin):
|
||||
""" Saves the script before extraction. """
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.49
|
||||
label = "Script Save"
|
||||
hosts = ["nuke"]
|
||||
families = ["studio.saver"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
nuke.scriptSave()
|
||||
33
config/plugins/nuke/publish/validate_proxy_mode.py
Normal file
33
config/plugins/nuke/publish/validate_proxy_mode.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class RepairNukeProxyModeAction(pyblish.api.Action):
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
nuke.root()["proxy"].setValue(0)
|
||||
|
||||
|
||||
class ValidateNukeProxyMode(pyblish.api.ContextPlugin):
|
||||
"""Validates against having proxy mode on."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
label = "Proxy Mode"
|
||||
actions = [RepairNukeProxyModeAction]
|
||||
hosts = ["nuke", "nukeassist"]
|
||||
# targets = ["default", "process"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
msg = (
|
||||
"Proxy mode is not supported. Please disable Proxy Mode in the "
|
||||
"Project settings."
|
||||
)
|
||||
assert not nuke.root()["proxy"].getValue(), msg
|
||||
293
config/vendor/clique/__init__.py
vendored
Normal file
293
config/vendor/clique/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,293 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
from ._version import __version__
|
||||
from .collection import Collection
|
||||
from .error import CollectionError
|
||||
|
||||
|
||||
#: Pattern for matching an index with optional padding.
|
||||
DIGITS_PATTERN = '(?P<index>(?P<padding>0*)\d+)'
|
||||
|
||||
#: Common patterns that can be passed to :py:func:`~clique.assemble`.
|
||||
PATTERNS = {
|
||||
'frames': '\.{0}\.\D+\d?$'.format(DIGITS_PATTERN),
|
||||
'versions': 'v{0}'.format(DIGITS_PATTERN)
|
||||
}
|
||||
|
||||
|
||||
def assemble(
|
||||
iterable, patterns=None, minimum_items=2, case_sensitive=True,
|
||||
assume_padded_when_ambiguous=False
|
||||
):
|
||||
'''Assemble items in *iterable* into discreet collections.
|
||||
|
||||
*patterns* may be specified as a list of regular expressions to limit
|
||||
the returned collection possibilities. Use this when interested in
|
||||
collections that only match specific patterns. Each pattern must contain
|
||||
the expression from :py:data:`DIGITS_PATTERN` exactly once.
|
||||
|
||||
A selection of common expressions are available in :py:data:`PATTERNS`.
|
||||
|
||||
.. note::
|
||||
|
||||
If a pattern is supplied as a string it will be automatically compiled
|
||||
to a :py:class:`re.RegexObject` instance for convenience.
|
||||
|
||||
When *patterns* is not specified, collections are formed by examining all
|
||||
possible groupings of the items in *iterable* based around common numerical
|
||||
components.
|
||||
|
||||
*minimum_items* dictates the minimum number of items a collection must have
|
||||
in order to be included in the result. The default is 2, filtering out
|
||||
single item collections.
|
||||
|
||||
If *case_sensitive* is False, then items will be treated as part of the same
|
||||
collection when they only differ in casing. To avoid ambiguity, the
|
||||
resulting collection will always be lowercase. For example, "item.0001.dpx"
|
||||
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
|
||||
|
||||
.. note::
|
||||
|
||||
Any compiled *patterns* will also respect the set case sensitivity.
|
||||
|
||||
For certain collections it may be ambiguous whether they are padded or not.
|
||||
For example, 1000-1010 can be considered either an unpadded collection or a
|
||||
four padded collection. By default, Clique is conservative and assumes that
|
||||
the collection is unpadded. To change this behaviour, set
|
||||
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
|
||||
a relevant padding set.
|
||||
|
||||
.. note::
|
||||
|
||||
*assume_padded_when_ambiguous* has no effect on collections that are
|
||||
unambiguous. For example, 1-100 will always be considered unpadded
|
||||
regardless of the *assume_padded_when_ambiguous* setting.
|
||||
|
||||
Return tuple of two lists (collections, remainder) where 'collections' is a
|
||||
list of assembled :py:class:`~clique.collection.Collection` instances and
|
||||
'remainder' is a list of items that did not belong to any collection.
|
||||
|
||||
'''
|
||||
collection_map = defaultdict(set)
|
||||
collections = []
|
||||
remainder = []
|
||||
|
||||
# Compile patterns.
|
||||
flags = 0
|
||||
if not case_sensitive:
|
||||
flags |= re.IGNORECASE
|
||||
|
||||
compiled_patterns = []
|
||||
|
||||
if patterns is not None:
|
||||
if not patterns:
|
||||
return collections, list(iterable)
|
||||
|
||||
for pattern in patterns:
|
||||
if isinstance(pattern, basestring):
|
||||
compiled_patterns.append(re.compile(pattern, flags=flags))
|
||||
else:
|
||||
compiled_patterns.append(pattern)
|
||||
|
||||
else:
|
||||
compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags))
|
||||
|
||||
# Process iterable.
|
||||
for item in iterable:
|
||||
matched = False
|
||||
|
||||
for pattern in compiled_patterns:
|
||||
for match in pattern.finditer(item):
|
||||
index = match.group('index')
|
||||
|
||||
head = item[:match.start('index')]
|
||||
tail = item[match.end('index'):]
|
||||
|
||||
if not case_sensitive:
|
||||
head = head.lower()
|
||||
tail = tail.lower()
|
||||
|
||||
padding = match.group('padding')
|
||||
if padding:
|
||||
padding = len(index)
|
||||
else:
|
||||
padding = 0
|
||||
|
||||
key = (head, tail, padding)
|
||||
collection_map[key].add(int(index))
|
||||
matched = True
|
||||
|
||||
if not matched:
|
||||
remainder.append(item)
|
||||
|
||||
# Form collections.
|
||||
merge_candidates = []
|
||||
for (head, tail, padding), indexes in collection_map.items():
|
||||
collection = Collection(head, tail, padding, indexes)
|
||||
collections.append(collection)
|
||||
|
||||
if collection.padding == 0:
|
||||
merge_candidates.append(collection)
|
||||
|
||||
# Merge together collections that align on padding boundaries. For example,
|
||||
# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only
|
||||
# indexes within the padding width limit are merged. If a collection is
|
||||
# entirely merged into another then it will not be included as a separate
|
||||
# collection in the results.
|
||||
fully_merged = []
|
||||
for collection in collections:
|
||||
if collection.padding == 0:
|
||||
continue
|
||||
|
||||
for candidate in merge_candidates:
|
||||
if (
|
||||
candidate.head == collection.head and
|
||||
candidate.tail == collection.tail
|
||||
):
|
||||
merged_index_count = 0
|
||||
for index in candidate.indexes:
|
||||
if len(str(abs(index))) == collection.padding:
|
||||
collection.indexes.add(index)
|
||||
merged_index_count += 1
|
||||
|
||||
if merged_index_count == len(candidate.indexes):
|
||||
fully_merged.append(candidate)
|
||||
|
||||
# Filter out fully merged collections.
|
||||
collections = [collection for collection in collections
|
||||
if collection not in fully_merged]
|
||||
|
||||
# Filter out collections that do not have at least as many indexes as
|
||||
# minimum_items. In addition, add any members of a filtered collection,
|
||||
# which are not members of an unfiltered collection, to the remainder.
|
||||
filtered = []
|
||||
remainder_candidates = []
|
||||
for collection in collections:
|
||||
if len(collection.indexes) >= minimum_items:
|
||||
filtered.append(collection)
|
||||
else:
|
||||
for member in collection:
|
||||
remainder_candidates.append(member)
|
||||
|
||||
for candidate in remainder_candidates:
|
||||
# Check if candidate has already been added to remainder to avoid
|
||||
# duplicate entries.
|
||||
if candidate in remainder:
|
||||
continue
|
||||
|
||||
has_membership = False
|
||||
|
||||
for collection in filtered:
|
||||
if candidate in collection:
|
||||
has_membership = True
|
||||
break
|
||||
|
||||
if not has_membership:
|
||||
remainder.append(candidate)
|
||||
|
||||
# Set padding for all ambiguous collections according to the
|
||||
# assume_padded_when_ambiguous setting.
|
||||
if assume_padded_when_ambiguous:
|
||||
for collection in filtered:
|
||||
if (
|
||||
not collection.padding and collection.indexes
|
||||
):
|
||||
indexes = list(collection.indexes)
|
||||
first_index_width = len(str(indexes[0]))
|
||||
last_index_width = len(str(indexes[-1]))
|
||||
if first_index_width == last_index_width:
|
||||
collection.padding = first_index_width
|
||||
|
||||
return filtered, remainder
|
||||
|
||||
|
||||
def parse(value, pattern='{head}{padding}{tail} [{ranges}]'):
|
||||
'''Parse *value* into a :py:class:`~clique.collection.Collection`.
|
||||
|
||||
Use *pattern* to extract information from *value*. It may make use of the
|
||||
following keys:
|
||||
|
||||
* *head* - Common leading part of the collection.
|
||||
* *tail* - Common trailing part of the collection.
|
||||
* *padding* - Padding value in ``%0d`` format.
|
||||
* *range* - Total range in the form ``start-end``.
|
||||
* *ranges* - Comma separated ranges of indexes.
|
||||
* *holes* - Comma separated ranges of missing indexes.
|
||||
|
||||
.. note::
|
||||
|
||||
*holes* only makes sense if *range* or *ranges* is also present.
|
||||
|
||||
'''
|
||||
# Construct regular expression for given pattern.
|
||||
expressions = {
|
||||
'head': '(?P<head>.*)',
|
||||
'tail': '(?P<tail>.*)',
|
||||
'padding': '%(?P<padding>\d*)d',
|
||||
'range': '(?P<range>\d+-\d+)?',
|
||||
'ranges': '(?P<ranges>[\d ,\-]+)?',
|
||||
'holes': '(?P<holes>[\d ,\-]+)'
|
||||
}
|
||||
|
||||
pattern_regex = re.escape(pattern)
|
||||
for key, expression in expressions.items():
|
||||
pattern_regex = pattern_regex.replace(
|
||||
'\{{{0}\}}'.format(key),
|
||||
expression
|
||||
)
|
||||
pattern_regex = '^{0}$'.format(pattern_regex)
|
||||
|
||||
# Match pattern against value and use results to construct collection.
|
||||
match = re.search(pattern_regex, value)
|
||||
if match is None:
|
||||
raise ValueError('Value did not match pattern.')
|
||||
|
||||
groups = match.groupdict()
|
||||
if 'padding' in groups and groups['padding']:
|
||||
groups['padding'] = int(groups['padding'])
|
||||
else:
|
||||
groups['padding'] = 0
|
||||
|
||||
# Create collection and then add indexes.
|
||||
collection = Collection(
|
||||
groups.get('head', ''),
|
||||
groups.get('tail', ''),
|
||||
groups['padding']
|
||||
)
|
||||
|
||||
if groups.get('range', None) is not None:
|
||||
start, end = map(int, groups['range'].split('-'))
|
||||
collection.indexes.update(range(start, end + 1))
|
||||
|
||||
if groups.get('ranges', None) is not None:
|
||||
parts = [part.strip() for part in groups['ranges'].split(',')]
|
||||
for part in parts:
|
||||
index_range = list(map(int, part.split('-', 2)))
|
||||
|
||||
if len(index_range) > 1:
|
||||
# Index range.
|
||||
for index in range(index_range[0], index_range[1] + 1):
|
||||
collection.indexes.add(index)
|
||||
else:
|
||||
# Single index.
|
||||
collection.indexes.add(index_range[0])
|
||||
|
||||
if 'holes' in groups:
|
||||
parts = [part.strip() for part in groups['holes'].split(',')]
|
||||
for part in parts:
|
||||
index_range = map(int, part.split('-', 2))
|
||||
|
||||
if len(index_range) > 1:
|
||||
# Index range.
|
||||
for index in range(index_range[0], index_range[1] + 1):
|
||||
collection.indexes.remove(index)
|
||||
else:
|
||||
# Single index.
|
||||
collection.indexes.remove(index_range[0])
|
||||
|
||||
return collection
|
||||
2
config/vendor/clique/_version.py
vendored
Normal file
2
config/vendor/clique/_version.py
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
__version__ = '1.5.0'
|
||||
|
||||
385
config/vendor/clique/collection.py
vendored
Normal file
385
config/vendor/clique/collection.py
vendored
Normal file
|
|
@ -0,0 +1,385 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
import re
|
||||
|
||||
import descriptor
|
||||
import error
|
||||
import sorted_set
|
||||
|
||||
|
||||
class Collection(object):
|
||||
'''Represent group of items that differ only by numerical component.'''
|
||||
|
||||
indexes = descriptor.Unsettable('indexes')
|
||||
|
||||
def __init__(self, head, tail, padding, indexes=None):
|
||||
'''Initialise collection.
|
||||
|
||||
*head* is the leading common part whilst *tail* is the trailing
|
||||
common part.
|
||||
|
||||
*padding* specifies the "width" of the numerical component. An index
|
||||
will be padded with zeros to fill this width. A *padding* of zero
|
||||
implies no padding and width may be any size so long as no leading
|
||||
zeros are present.
|
||||
|
||||
*indexes* can specify a set of numerical indexes to initially populate
|
||||
the collection with.
|
||||
|
||||
.. note::
|
||||
|
||||
After instantiation, the ``indexes`` attribute cannot be set to a
|
||||
new value using assignment::
|
||||
|
||||
>>> collection.indexes = [1, 2, 3]
|
||||
AttributeError: Cannot set attribute defined as unsettable.
|
||||
|
||||
Instead, manipulate it directly::
|
||||
|
||||
>>> collection.indexes.clear()
|
||||
>>> collection.indexes.update([1, 2, 3])
|
||||
|
||||
'''
|
||||
super(Collection, self).__init__()
|
||||
self.__dict__['indexes'] = sorted_set.SortedSet()
|
||||
self._head = head
|
||||
self._tail = tail
|
||||
self.padding = padding
|
||||
self._update_expression()
|
||||
|
||||
if indexes is not None:
|
||||
self.indexes.update(indexes)
|
||||
|
||||
@property
|
||||
def head(self):
|
||||
'''Return common leading part.'''
|
||||
return self._head
|
||||
|
||||
@head.setter
|
||||
def head(self, value):
|
||||
'''Set common leading part to *value*.'''
|
||||
self._head = value
|
||||
self._update_expression()
|
||||
|
||||
@property
|
||||
def tail(self):
|
||||
'''Return common trailing part.'''
|
||||
return self._tail
|
||||
|
||||
@tail.setter
|
||||
def tail(self, value):
|
||||
'''Set common trailing part to *value*.'''
|
||||
self._tail = value
|
||||
self._update_expression()
|
||||
|
||||
def _update_expression(self):
|
||||
'''Update internal expression.'''
|
||||
self._expression = re.compile(
|
||||
'^{0}(?P<index>(?P<padding>0*)\d+?){1}$'
|
||||
.format(re.escape(self.head), re.escape(self.tail))
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
'''Return string represenation.'''
|
||||
return self.format()
|
||||
|
||||
def __repr__(self):
|
||||
'''Return representation.'''
|
||||
return '<{0} "{1}">'.format(self.__class__.__name__, self)
|
||||
|
||||
def __iter__(self):
|
||||
'''Return iterator over items in collection.'''
|
||||
for index in self.indexes:
|
||||
formatted_index = '{0:0{1}d}'.format(index, self.padding)
|
||||
item = '{0}{1}{2}'.format(self.head, formatted_index, self.tail)
|
||||
yield item
|
||||
|
||||
def __contains__(self, item):
|
||||
'''Return whether *item* is present in collection.'''
|
||||
match = self.match(item)
|
||||
if not match:
|
||||
return False
|
||||
|
||||
if not int(match.group('index')) in self.indexes:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __eq__(self, other):
|
||||
'''Return whether *other* collection is equal.'''
|
||||
if not isinstance(other, Collection):
|
||||
return NotImplemented
|
||||
|
||||
return all([
|
||||
other.head == self.head,
|
||||
other.tail == self.tail,
|
||||
other.padding == self.padding,
|
||||
other.indexes == self.indexes
|
||||
])
|
||||
|
||||
def __ne__(self, other):
|
||||
'''Return whether *other* collection is not equal.'''
|
||||
result = self.__eq__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
return not result
|
||||
|
||||
def __gt__(self, other):
|
||||
'''Return whether *other* collection is greater than.'''
|
||||
if not isinstance(other, Collection):
|
||||
return NotImplemented
|
||||
|
||||
a = (self.head, self.tail, self.padding, len(self.indexes))
|
||||
b = (other.head, other.tail, other.padding, len(other.indexes))
|
||||
|
||||
return a > b
|
||||
|
||||
def __lt__(self, other):
|
||||
'''Return whether *other* collection is less than.'''
|
||||
result = self.__gt__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
return not result
|
||||
|
||||
def __ge__(self, other):
|
||||
'''Return whether *other* collection is greater than or equal.'''
|
||||
result = self.__eq__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
if result is False:
|
||||
result = self.__gt__(other)
|
||||
|
||||
return result
|
||||
|
||||
def __le__(self, other):
|
||||
'''Return whether *other* collection is less than or equal.'''
|
||||
result = self.__eq__(other)
|
||||
if result is NotImplemented:
|
||||
return result
|
||||
|
||||
if result is False:
|
||||
result = self.__lt__(other)
|
||||
|
||||
return result
|
||||
|
||||
def match(self, item):
|
||||
'''Return whether *item* matches this collection expression.
|
||||
|
||||
If a match is successful return data about the match otherwise return
|
||||
None.
|
||||
|
||||
'''
|
||||
match = self._expression.match(item)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
index = match.group('index')
|
||||
padded = False
|
||||
if match.group('padding'):
|
||||
padded = True
|
||||
|
||||
if self.padding == 0:
|
||||
if padded:
|
||||
return None
|
||||
|
||||
elif len(index) != self.padding:
|
||||
return None
|
||||
|
||||
return match
|
||||
|
||||
def add(self, item):
|
||||
'''Add *item* to collection.
|
||||
|
||||
raise :py:class:`~error.CollectionError` if *item* cannot be
|
||||
added to the collection.
|
||||
|
||||
'''
|
||||
match = self.match(item)
|
||||
if match is None:
|
||||
raise error.CollectionError(
|
||||
'Item does not match collection expression.'
|
||||
)
|
||||
|
||||
self.indexes.add(int(match.group('index')))
|
||||
|
||||
def remove(self, item):
|
||||
'''Remove *item* from collection.
|
||||
|
||||
raise :py:class:`~error.CollectionError` if *item* cannot be
|
||||
removed from the collection.
|
||||
|
||||
'''
|
||||
match = self.match(item)
|
||||
if match is None:
|
||||
raise error.CollectionError(
|
||||
'Item not present in collection.'
|
||||
)
|
||||
|
||||
index = int(match.group('index'))
|
||||
try:
|
||||
self.indexes.remove(index)
|
||||
except KeyError:
|
||||
raise error.CollectionError(
|
||||
'Item not present in collection.'
|
||||
)
|
||||
|
||||
def format(self, pattern='{head}{padding}{tail} [{ranges}]'):
|
||||
'''Return string representation as specified by *pattern*.
|
||||
|
||||
Pattern can be any format accepted by Python's standard format function
|
||||
and will receive the following keyword arguments as context:
|
||||
|
||||
* *head* - Common leading part of the collection.
|
||||
* *tail* - Common trailing part of the collection.
|
||||
* *padding* - Padding value in ``%0d`` format.
|
||||
* *range* - Total range in the form ``start-end``
|
||||
* *ranges* - Comma separated ranges of indexes.
|
||||
* *holes* - Comma separated ranges of missing indexes.
|
||||
|
||||
'''
|
||||
data = {}
|
||||
data['head'] = self.head
|
||||
data['tail'] = self.tail
|
||||
|
||||
if self.padding:
|
||||
data['padding'] = '%0{0}d'.format(self.padding)
|
||||
else:
|
||||
data['padding'] = '%d'
|
||||
|
||||
if '{holes}' in pattern:
|
||||
data['holes'] = self.holes().format('{ranges}')
|
||||
|
||||
if '{range}' in pattern or '{ranges}' in pattern:
|
||||
indexes = list(self.indexes)
|
||||
indexes_count = len(indexes)
|
||||
|
||||
if indexes_count == 0:
|
||||
data['range'] = ''
|
||||
|
||||
elif indexes_count == 1:
|
||||
data['range'] = '{0}'.format(indexes[0])
|
||||
|
||||
else:
|
||||
data['range'] = '{0}-{1}'.format(
|
||||
indexes[0], indexes[-1]
|
||||
)
|
||||
|
||||
if '{ranges}' in pattern:
|
||||
separated = self.separate()
|
||||
if len(separated) > 1:
|
||||
ranges = [collection.format('{range}')
|
||||
for collection in separated]
|
||||
|
||||
else:
|
||||
ranges = [data['range']]
|
||||
|
||||
data['ranges'] = ', '.join(ranges)
|
||||
|
||||
return pattern.format(**data)
|
||||
|
||||
def is_contiguous(self):
|
||||
'''Return whether entire collection is contiguous.'''
|
||||
previous = None
|
||||
for index in self.indexes:
|
||||
if previous is None:
|
||||
previous = index
|
||||
continue
|
||||
|
||||
if index != (previous + 1):
|
||||
return False
|
||||
|
||||
previous = index
|
||||
|
||||
return True
|
||||
|
||||
def holes(self):
|
||||
'''Return holes in collection.
|
||||
|
||||
Return :py:class:`~collection.Collection` of missing indexes.
|
||||
|
||||
'''
|
||||
missing = set([])
|
||||
previous = None
|
||||
for index in self.indexes:
|
||||
if previous is None:
|
||||
previous = index
|
||||
continue
|
||||
|
||||
if index != (previous + 1):
|
||||
missing.update(range(previous + 1, index))
|
||||
|
||||
previous = index
|
||||
|
||||
return Collection(self.head, self.tail, self.padding, indexes=missing)
|
||||
|
||||
def is_compatible(self, collection):
|
||||
'''Return whether *collection* is compatible with this collection.
|
||||
|
||||
To be compatible *collection* must have the same head, tail and padding
|
||||
properties as this collection.
|
||||
|
||||
'''
|
||||
return all([
|
||||
isinstance(collection, Collection),
|
||||
collection.head == self.head,
|
||||
collection.tail == self.tail,
|
||||
collection.padding == self.padding
|
||||
])
|
||||
|
||||
def merge(self, collection):
|
||||
'''Merge *collection* into this collection.
|
||||
|
||||
If the *collection* is compatible with this collection then update
|
||||
indexes with all indexes in *collection*.
|
||||
|
||||
raise :py:class:`~error.CollectionError` if *collection* is not
|
||||
compatible with this collection.
|
||||
|
||||
'''
|
||||
if not self.is_compatible(collection):
|
||||
raise error.CollectionError('Collection is not compatible '
|
||||
'with this collection.')
|
||||
|
||||
self.indexes.update(collection.indexes)
|
||||
|
||||
def separate(self):
|
||||
'''Return contiguous parts of collection as separate collections.
|
||||
|
||||
Return as list of :py:class:`~collection.Collection` instances.
|
||||
|
||||
'''
|
||||
collections = []
|
||||
start = None
|
||||
end = None
|
||||
|
||||
for index in self.indexes:
|
||||
if start is None:
|
||||
start = index
|
||||
end = start
|
||||
continue
|
||||
|
||||
if index != (end + 1):
|
||||
collections.append(
|
||||
Collection(self.head, self.tail, self.padding,
|
||||
indexes=set(range(start, end + 1)))
|
||||
)
|
||||
start = index
|
||||
|
||||
end = index
|
||||
|
||||
if start is None:
|
||||
collections.append(
|
||||
Collection(self.head, self.tail, self.padding)
|
||||
)
|
||||
else:
|
||||
collections.append(
|
||||
Collection(self.head, self.tail, self.padding,
|
||||
indexes=range(start, end + 1))
|
||||
)
|
||||
|
||||
return collections
|
||||
43
config/vendor/clique/descriptor.py
vendored
Normal file
43
config/vendor/clique/descriptor.py
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
|
||||
class Unsettable(object):
|
||||
'''Prevent standard setting of property.
|
||||
|
||||
Example::
|
||||
|
||||
>>> class Foo(object):
|
||||
...
|
||||
... x = Unsettable('x')
|
||||
...
|
||||
... def __init__(self):
|
||||
... self.__dict__['x'] = True
|
||||
...
|
||||
>>> foo = Foo()
|
||||
>>> print foo.x
|
||||
True
|
||||
>>> foo.x = False
|
||||
AttributeError: Cannot set attribute defined as unsettable.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, label):
|
||||
'''Initialise descriptor with property *label*.
|
||||
|
||||
*label* should match the name of the property being described::
|
||||
|
||||
x = Unsettable('x')
|
||||
|
||||
'''
|
||||
self.label = label
|
||||
super(Unsettable, self).__init__()
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
'''Return value of property for *instance*.'''
|
||||
return instance.__dict__.get(self.label)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
'''Set *value* for *instance* property.'''
|
||||
raise AttributeError('Cannot set attribute defined as unsettable.')
|
||||
10
config/vendor/clique/error.py
vendored
Normal file
10
config/vendor/clique/error.py
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
'''Custom error classes.'''
|
||||
|
||||
|
||||
class CollectionError(Exception):
|
||||
'''Raise when a collection error occurs.'''
|
||||
|
||||
62
config/vendor/clique/sorted_set.py
vendored
Normal file
62
config/vendor/clique/sorted_set.py
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
# :coding: utf-8
|
||||
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
|
||||
# :license: See LICENSE.txt.
|
||||
|
||||
import collections
|
||||
import bisect
|
||||
|
||||
|
||||
class SortedSet(collections.MutableSet):
|
||||
'''Maintain sorted collection of unique items.'''
|
||||
|
||||
def __init__(self, iterable=None):
|
||||
'''Initialise with items from *iterable*.'''
|
||||
super(SortedSet, self).__init__()
|
||||
self._members = []
|
||||
if iterable:
|
||||
self.update(iterable)
|
||||
|
||||
def __str__(self):
|
||||
'''Return string representation.'''
|
||||
return str(self._members)
|
||||
|
||||
def __repr__(self):
|
||||
'''Return representation.'''
|
||||
return '<{0} "{1}">'.format(self.__class__.__name__, self)
|
||||
|
||||
def __contains__(self, item):
|
||||
'''Return whether *item* is present.'''
|
||||
return self._index(item) >= 0
|
||||
|
||||
def __len__(self):
|
||||
'''Return number of items.'''
|
||||
return len(self._members)
|
||||
|
||||
def __iter__(self):
|
||||
'''Return iterator over items.'''
|
||||
return iter(self._members)
|
||||
|
||||
def add(self, item):
|
||||
'''Add *item*.'''
|
||||
if not item in self:
|
||||
index = bisect.bisect_right(self._members, item)
|
||||
self._members.insert(index, item)
|
||||
|
||||
def discard(self, item):
|
||||
'''Remove *item*.'''
|
||||
index = self._index(item)
|
||||
if index >= 0:
|
||||
del self._members[index]
|
||||
|
||||
def update(self, iterable):
|
||||
'''Update items with those from *iterable*.'''
|
||||
for item in iterable:
|
||||
self.add(item)
|
||||
|
||||
def _index(self, item):
|
||||
'''Return index of *item* in member list or -1 if not present.'''
|
||||
index = bisect.bisect_left(self._members, item)
|
||||
if index != len(self) and self._members[index] == item:
|
||||
return index
|
||||
|
||||
return -1
|
||||
Loading…
Add table
Add a link
Reference in a new issue