From 84b6efbbf6d95b2f5f137a1b075646d18d2e3100 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 15 Nov 2018 18:03:04 +0100 Subject: [PATCH 01/68] nuke menu.py autostart workfiles --- setup/nuke/nuke_path/menu.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py index 043d4fd7b9..20300b6c17 100644 --- a/setup/nuke/nuke_path/menu.py +++ b/setup/nuke/nuke_path/menu.py @@ -8,6 +8,7 @@ for each in nuke.allNodes(): each['file'].setValue(re.sub('[vV]\d+', rootVersion, each['file'].value())) ''' nuke.knobDefault('onScriptSave', cmd) + print '\n>>> menu.py: Function for automatic check of version in write nodes is added\n' ffmpeg_cmd = '''if nuke.env['LINUX']: @@ -19,9 +20,9 @@ else: nuke.knobDefault('onScriptLoad', ffmpeg_cmd) -# # run avalon's tool Workfiles -# workfiles = '''from avalon.tools import workfiles -# if nuke.Root().name() == 'Root': -# nuke.scriptClose() -# workfiles.show(os.environ["AVALON_WORKDIR"])''' -# nuke.knobDefault('onCreate', workfiles) +# run avalon's tool Workfiles +workfiles = '''from avalon.tools import workfiles +if nuke.Root().name() == 'Root': + nuke.scriptClose() +workfiles.show(os.environ["AVALON_WORKDIR"])''' +nuke.knobDefault('onCreate', workfiles) From 29325c3fa2774442bd110c6031406205a4f9005d Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 15 Nov 2018 19:10:48 +0100 Subject: [PATCH 02/68] let nuke not to run workfiles at start --- setup/nuke/nuke_path/menu.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py index 20300b6c17..a454b8bd66 100644 --- a/setup/nuke/nuke_path/menu.py +++ b/setup/nuke/nuke_path/menu.py @@ -1,3 +1,5 @@ + +from avalon.tools import workfiles import nuke # auto fix version paths in write nodes following root name of script cmd = ''' @@ -20,9 +22,11 @@ else: nuke.knobDefault('onScriptLoad', ffmpeg_cmd) -# run avalon's tool Workfiles -workfiles = '''from avalon.tools import workfiles -if nuke.Root().name() == 'Root': - nuke.scriptClose() -workfiles.show(os.environ["AVALON_WORKDIR"])''' -nuke.knobDefault('onCreate', workfiles) +# # run avalon's tool Workfiles +# workfiles = '''from avalon.tools import workfiles +# if nuke.Root().name() == 'Root': +# nuke.scriptClear() +# workfiles.show(os.environ["AVALON_WORKDIR"])''' +# nuke.knobDefault('onCreate', workfiles) + +# workfiles.show(os.environ["AVALON_WORKDIR"]) From bd128b25de93222f9b4bb209126abedaa71a61de Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 15 Nov 2018 22:54:47 +0100 Subject: [PATCH 03/68] plugins ideas --- pype/plugins/nuke/create/create_backdrop | 2 ++ pype/plugins/nuke/create/create_camera | 3 +++ pype/plugins/nuke/create/create_read_plate | 8 ++++++++ pype/plugins/nuke/create/create_write | 17 +++++++++++++++++ pype/plugins/nuke/load/load_alembic | 0 pype/plugins/nuke/load/load_backdrop | 0 pype/plugins/nuke/load/load_camera_abc | 0 pype/plugins/nuke/load/load_camera_nk | 1 + pype/plugins/nuke/load/load_still | 1 + 9 files changed, 32 insertions(+) create mode 100644 pype/plugins/nuke/create/create_backdrop create mode 100644 pype/plugins/nuke/create/create_camera create mode 100644 pype/plugins/nuke/create/create_read_plate create mode 100644 pype/plugins/nuke/create/create_write create mode 100644 pype/plugins/nuke/load/load_alembic create mode 100644 pype/plugins/nuke/load/load_backdrop create mode 100644 pype/plugins/nuke/load/load_camera_abc create mode 100644 pype/plugins/nuke/load/load_camera_nk create mode 100644 pype/plugins/nuke/load/load_still diff --git a/pype/plugins/nuke/create/create_backdrop b/pype/plugins/nuke/create/create_backdrop new file mode 100644 index 0000000000..2cdc222618 --- /dev/null +++ b/pype/plugins/nuke/create/create_backdrop @@ -0,0 +1,2 @@ +# creates backdrop which is published as separate nuke script +# it is versioned by major version diff --git a/pype/plugins/nuke/create/create_camera b/pype/plugins/nuke/create/create_camera new file mode 100644 index 0000000000..0d542b8ad7 --- /dev/null +++ b/pype/plugins/nuke/create/create_camera @@ -0,0 +1,3 @@ +# create vanilla camera if no camera is selected +# if camera is selected then it will convert it into containerized object +# it is major versioned in publish diff --git a/pype/plugins/nuke/create/create_read_plate b/pype/plugins/nuke/create/create_read_plate new file mode 100644 index 0000000000..90a47cb55e --- /dev/null +++ b/pype/plugins/nuke/create/create_read_plate @@ -0,0 +1,8 @@ +# create publishable read node usually used for enabling version tracking +# also useful for sharing across shots or assets + +# if read nodes are selected it will convert them to centainer +# if no read node selected it will create read node and offer browser to shot resource folder + +# type movie > mov or imagesequence +# type still > matpaint .psd, .tif, .png, diff --git a/pype/plugins/nuke/create/create_write b/pype/plugins/nuke/create/create_write new file mode 100644 index 0000000000..dcb132875a --- /dev/null +++ b/pype/plugins/nuke/create/create_write @@ -0,0 +1,17 @@ +# type: render +# if no render type node in script then first is having in name [master] for definition of main script renderer +# colorspace setting from templates +# dataflow setting from templates + +# type: mask_render +# created with shuffle gizmo for RGB separation into davinci matte +# colorspace setting from templates +# dataflow setting from templates + +# type: prerender +# backdrop with write and read +# colorspace setting from templates +# dataflow setting from templates + +# type: geo +# dataflow setting from templates diff --git a/pype/plugins/nuke/load/load_alembic b/pype/plugins/nuke/load/load_alembic new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/plugins/nuke/load/load_backdrop b/pype/plugins/nuke/load/load_backdrop new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/plugins/nuke/load/load_camera_abc b/pype/plugins/nuke/load/load_camera_abc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/plugins/nuke/load/load_camera_nk b/pype/plugins/nuke/load/load_camera_nk new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/pype/plugins/nuke/load/load_camera_nk @@ -0,0 +1 @@ + diff --git a/pype/plugins/nuke/load/load_still b/pype/plugins/nuke/load/load_still new file mode 100644 index 0000000000..c2aa061c5a --- /dev/null +++ b/pype/plugins/nuke/load/load_still @@ -0,0 +1 @@ +# usually used for mattepainting From 539a010ce8ebebdbe1e3eb323d2cab9b34fda673 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 15 Nov 2018 22:55:20 +0100 Subject: [PATCH 04/68] adding plugins from maya and old repository for wip --- .../nuke/publish/collect_nuke_writes.py | 198 +++++++++++++ .../publish/extract_nuke_baked_colorspace.py | 107 +++++++ .../nuke/publish/extract_nuke_write.py | 116 ++++++++ pype/plugins/nuke/publish/submit_deadline.py | 264 ++++++++++++++++++ .../publish/validate_deadline_connection.py | 27 ++ 5 files changed, 712 insertions(+) create mode 100644 pype/plugins/nuke/publish/collect_nuke_writes.py create mode 100644 pype/plugins/nuke/publish/extract_nuke_baked_colorspace.py create mode 100644 pype/plugins/nuke/publish/extract_nuke_write.py create mode 100644 pype/plugins/nuke/publish/submit_deadline.py create mode 100644 pype/plugins/nuke/publish/validate_deadline_connection.py diff --git a/pype/plugins/nuke/publish/collect_nuke_writes.py b/pype/plugins/nuke/publish/collect_nuke_writes.py new file mode 100644 index 0000000000..7f301803fb --- /dev/null +++ b/pype/plugins/nuke/publish/collect_nuke_writes.py @@ -0,0 +1,198 @@ +import os + +import nuke +import pyblish.api +import clique +import ft_utils +reload(ft_utils) + +global pre_name +pre_name = ft_utils.get_paths_from_template(['shot.vfx.prerender'], + False)[0].split('_')[0] + + +class CollectNukeWrites(pyblish.api.ContextPlugin): + """Collect all write nodes.""" + + order = pyblish.api.CollectorOrder + label = "Writes" + hosts = ["nuke", "nukeassist"] + + # targets = ["default", "process"] + + def process(self, context): + + instances = [] + # creating instances per write node + for node in nuke.allNodes(): + if node.Class() != "Write": + continue + + # Determine output type + output_type = "img" + if node["file_type"].value() == "mov": + output_type = "mov" + + # Create instance + instance = pyblish.api.Instance(node.name()) + instance.data["family"] = output_type + instance.add(node) + instance.data["label"] = node.name() + + instance.data["publish"] = False + + # Get frame range + start_frame = int(nuke.root()["first_frame"].getValue()) + end_frame = int(nuke.root()["last_frame"].getValue()) + if node["use_limit"].getValue(): + start_frame = int(node["first"].getValue()) + end_frame = int(node["last"].getValue()) + print "writeNode collected: {}".format(node.name()) + # Add collection + collection = None + try: + path = "" + if pre_name in node.name(): + path = ft_utils.convert_hashes_in_file_name( + node['prerender_path'].getText()) + else: + path = nuke.filename(node) + path += " [{0}-{1}]".format(start_frame, end_frame) + collection = clique.parse(path) + ################################################### + '''possible place to start create mov publish write collection''' + ################################################### + except ValueError: + # Ignore the exception when the path does not match the + # collection. + pass + + instance.data["collection"] = collection + + instances.append(instance) + + context.data["write_instances"] = instances + + context.data["instances"] = ( + context.data.get("instances", []) + instances) + + +class CollectNukeWritesProcess(pyblish.api.ContextPlugin): + """Collect all local processing write instances.""" + + order = CollectNukeWrites.order + 0.01 + label = "Writes Local" + hosts = ["nuke"] + + # targets = ["process.local"] + + def process(self, context): + + for item in context.data["write_instances"]: + instance = context.create_instance(item.data["name"]) + for key, value in item.data.iteritems(): + instance.data[key] = value + + if pre_name not in item.data["name"]: + instance.data["label"] += " - write - local" + instance.data["families"] = ["write", "local"] + else: + instance.data["label"] += " - prerender - local" + instance.data["families"] = ["prerender", "local"] + + for node in item: + instance.add(node) + + # Adding/Checking publish attribute + if "process_local" not in node.knobs(): + knob = nuke.Boolean_Knob("process_local", "Process Local") + knob.setValue(False) + node.addKnob(knob) + + value = bool(node["process_local"].getValue()) + + # Compare against selection + selection = instance.context.data.get("selection", []) + if selection: + if list(set(instance) & set(selection)): + value = True + else: + value = False + + instance.data["publish"] = value + + def instanceToggled(instance, value): + instance[0]["process_local"].setValue(value) + + instance.data["instanceToggled"] = instanceToggled + + +class CollectNukeWritesPublish(pyblish.api.ContextPlugin): + """Collect all write instances for publishing.""" + + order = CollectNukeWrites.order + 0.01 + label = "Writes" + hosts = ["nuke", "nukeassist"] + + # targets = ["default"] + + def process(self, context): + + for item in context.data["write_instances"]: + + # If the collection was not generated. + if not item.data["collection"]: + continue + + missing_files = [] + for f in item.data["collection"]: + # print f + if not os.path.exists(f): + missing_files.append(f) + + for f in missing_files: + item.data["collection"].remove(f) + + if not list(item.data["collection"]): + continue + + instance = context.create_instance(item.data["name"]) + + for key, value in item.data.iteritems(): + # print key, value + instance.data[key] = value + + instance.data["families"] = ["output"] + instance.data["label"] += ( + " - " + os.path.basename(instance.data["collection"].format())) + + for node in item: + instance.add(node) + + # Adding/Checking publish attribute + if "publish" not in node.knobs(): + knob = nuke.Boolean_Knob("publish", "Publish") + knob.setValue(False) + node.addKnob(knob) + + value = bool(node["publish"].getValue()) + + # Compare against selection + selection = instance.context.data.get("selection", []) + if selection: + if list(set(instance) & set(selection)): + value = True + else: + value = False + + instance.data["publish"] = value + + def instanceToggled(instance, value): + # Removing and adding the knob to support NukeAssist, where + # you can't modify the knob value directly. + instance[0].removeKnob(instance[0]["publish"]) + knob = nuke.Boolean_Knob("publish", "Publish") + knob.setValue(value) + instance[0].addKnob(knob) + + instance.data["instanceToggled"] = instanceToggled diff --git a/pype/plugins/nuke/publish/extract_nuke_baked_colorspace.py b/pype/plugins/nuke/publish/extract_nuke_baked_colorspace.py new file mode 100644 index 0000000000..f2561bd7c5 --- /dev/null +++ b/pype/plugins/nuke/publish/extract_nuke_baked_colorspace.py @@ -0,0 +1,107 @@ +import os +import tempfile +import shutil + +import nuke + +import pyblish.api + + +class ExtractNukeBakedColorspace(pyblish.api.InstancePlugin): + """Extracts movie with baked in luts + + V:\Remote Apps\ffmpeg\bin>ffmpeg -y -i + V:/FUGA/VFX_OUT/VFX_070010/v02/VFX_070010_comp_v02._baked.mov + -pix_fmt yuv420p + -crf 18 + -timecode 00:00:00:01 + V:/FUGA/VFX_OUT/VFX_070010/v02/VFX_070010_comp_v02..mov + + """ + + order = pyblish.api.ExtractorOrder + label = "Baked Colorspace" + optional = True + families = ["review"] + hosts = ["nuke"] + + def process(self, instance): + + if "collection" not in instance.data.keys(): + return + + # Store selection + selection = [i for i in nuke.allNodes() if i["selected"].getValue()] + + # Deselect all nodes to prevent external connections + [i["selected"].setValue(False) for i in nuke.allNodes()] + + temporary_nodes = [] + + # Create nodes + first_frame = min(instance.data["collection"].indexes) + last_frame = max(instance.data["collection"].indexes) + + temp_dir = tempfile.mkdtemp() + for f in instance.data["collection"]: + shutil.copy(f, os.path.join(temp_dir, os.path.basename(f))) + + node = previous_node = nuke.createNode("Read") + node["file"].setValue( + os.path.join(temp_dir, + os.path.basename(instance.data["collection"].format( + "{head}{padding}{tail}"))).replace("\\", "/")) + + node["first"].setValue(first_frame) + node["origfirst"].setValue(first_frame) + node["last"].setValue(last_frame) + node["origlast"].setValue(last_frame) + temporary_nodes.append(node) + + reformat_node = nuke.createNode("Reformat") + reformat_node["format"].setValue("HD_1080") + reformat_node["resize"].setValue("fit") + reformat_node["filter"].setValue("Lanczos6") + reformat_node["black_outside"].setValue(True) + reformat_node.setInput(0, previous_node) + previous_node = reformat_node + temporary_nodes.append(reformat_node) + + viewer_process_node = nuke.ViewerProcess.node() + dag_node = None + if viewer_process_node: + dag_node = nuke.createNode(viewer_process_node.Class()) + dag_node.setInput(0, previous_node) + previous_node = dag_node + temporary_nodes.append(dag_node) + # Copy viewer process values + excludedKnobs = ["name", "xpos", "ypos"] + for item in viewer_process_node.knobs().keys(): + if item not in excludedKnobs and item in dag_node.knobs(): + x1 = viewer_process_node[item] + x2 = dag_node[item] + x2.fromScript(x1.toScript(False)) + else: + self.log.warning("No viewer node found.") + + write_node = nuke.createNode("Write") + path = instance.data["collection"].format("{head}_baked.mov") + instance.data["baked_colorspace_movie"] = path + write_node["file"].setValue(path.replace("\\", "/")) + write_node["file_type"].setValue("mov") + write_node["raw"].setValue(1) + write_node.setInput(0, previous_node) + temporary_nodes.append(write_node) + + # Render frames + nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + + # Clean up + for node in temporary_nodes: + nuke.delete(node) + + shutil.rmtree(temp_dir) + + # Restore selection + [i["selected"].setValue(False) for i in nuke.allNodes()] + [i["selected"].setValue(True) for i in selection] diff --git a/pype/plugins/nuke/publish/extract_nuke_write.py b/pype/plugins/nuke/publish/extract_nuke_write.py new file mode 100644 index 0000000000..155b5cf56d --- /dev/null +++ b/pype/plugins/nuke/publish/extract_nuke_write.py @@ -0,0 +1,116 @@ +import os + +import nuke +import pyblish.api + + +class Extract(pyblish.api.InstancePlugin): + """Super class for write and writegeo extractors.""" + + order = pyblish.api.ExtractorOrder + optional = True + label = "Extract Nuke [super]" + hosts = ["nuke"] + match = pyblish.api.Subset + + # targets = ["process.local"] + + def execute(self, instance): + # Get frame range + node = instance[0] + first_frame = nuke.root()["first_frame"].value() + last_frame = nuke.root()["last_frame"].value() + + if node["use_limit"].value(): + first_frame = node["first"].value() + last_frame = node["last"].value() + + # Render frames + nuke.execute(node.name(), int(first_frame), int(last_frame)) + + +class ExtractNukeWrite(Extract): + """ Extract output from write nodes. """ + + families = ["write", "local"] + label = "Extract Write" + + def process(self, instance): + + self.execute(instance) + + # Validate output + for filename in list(instance.data["collection"]): + if not os.path.exists(filename): + instance.data["collection"].remove(filename) + self.log.warning("\"{0}\" didn't render.".format(filename)) + + +class ExtractNukeCache(Extract): + + label = "Cache" + families = ["cache", "local"] + + def process(self, instance): + + self.execute(instance) + + # Validate output + msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) + assert os.path.exists(instance.data["output_path"]), msg + + +class ExtractNukeCamera(Extract): + + label = "Camera" + families = ["camera", "local"] + + def process(self, instance): + + node = instance[0] + node["writeGeometries"].setValue(False) + node["writePointClouds"].setValue(False) + node["writeAxes"].setValue(False) + + file_path = node["file"].getValue() + node["file"].setValue(instance.data["output_path"]) + + self.execute(instance) + + node["writeGeometries"].setValue(True) + node["writePointClouds"].setValue(True) + node["writeAxes"].setValue(True) + + node["file"].setValue(file_path) + + # Validate output + msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) + assert os.path.exists(instance.data["output_path"]), msg + + +class ExtractNukeGeometry(Extract): + + label = "Geometry" + families = ["geometry", "local"] + + def process(self, instance): + + node = instance[0] + node["writeCameras"].setValue(False) + node["writePointClouds"].setValue(False) + node["writeAxes"].setValue(False) + + file_path = node["file"].getValue() + node["file"].setValue(instance.data["output_path"]) + + self.execute(instance) + + node["writeCameras"].setValue(True) + node["writePointClouds"].setValue(True) + node["writeAxes"].setValue(True) + + node["file"].setValue(file_path) + + # Validate output + msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) + assert os.path.exists(instance.data["output_path"]), msg diff --git a/pype/plugins/nuke/publish/submit_deadline.py b/pype/plugins/nuke/publish/submit_deadline.py new file mode 100644 index 0000000000..4dabf4837e --- /dev/null +++ b/pype/plugins/nuke/publish/submit_deadline.py @@ -0,0 +1,264 @@ +import os +import json +import getpass + +from maya import cmds + +from avalon import api +from avalon.vendor import requests + +import pyblish.api + +import pype.maya.lib as lib + + +def get_renderer_variables(renderlayer=None): + """Retrieve the extension which has been set in the VRay settings + + Will return None if the current renderer is not VRay + For Maya 2016.5 and up the renderSetup creates renderSetupLayer node which + start with `rs`. Use the actual node name, do NOT use the `nice name` + + Args: + renderlayer (str): the node name of the renderlayer. + + Returns: + dict + """ + + renderer = lib.get_renderer(renderlayer or lib.get_current_renderlayer()) + render_attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS["default"]) + + padding = cmds.getAttr("{}.{}".format(render_attrs["node"], + render_attrs["padding"])) + + filename_0 = cmds.renderSettings(fullPath=True, firstImageName=True)[0] + + if renderer == "vray": + # Maya's renderSettings function does not return V-Ray file extension + # so we get the extension from vraySettings + extension = cmds.getAttr("vraySettings.imageFormatStr") + + # When V-Ray image format has not been switched once from default .png + # the getAttr command above returns None. As such we explicitly set + # it to `.png` + if extension is None: + extension = "png" + + filename_prefix = "/_/" + else: + # Get the extension, getAttr defaultRenderGlobals.imageFormat + # returns an index number. + filename_base = os.path.basename(filename_0) + extension = os.path.splitext(filename_base)[-1].strip(".") + filename_prefix = "/_/" + + return {"ext": extension, + "filename_prefix": filename_prefix, + "padding": padding, + "filename_0": filename_0} + + +def preview_fname(folder, scene, layer, padding, ext): + """Return output file path with #### for padding. + + Deadline requires the path to be formatted with # in place of numbers. + For example `/path/to/render.####.png` + + Args: + folder (str): The root output folder (image path) + scene (str): The scene name + layer (str): The layer name to be rendered + padding (int): The padding length + ext(str): The output file extension + + Returns: + str + + """ + + # Following hardcoded "/_/" + output = "{scene}/{scene}_{layer}/{layer}.{number}.{ext}".format( + scene=scene, + layer=layer, + number="#" * padding, + ext=ext + ) + + return os.path.join(folder, output) + + +class MayaSubmitDeadline(pyblish.api.InstancePlugin): + """Submit available render layers to Deadline + + Renders are submitted to a Deadline Web Service as + supplied via the environment variable AVALON_DEADLINE + + """ + + label = "Submit to Deadline" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["renderlayer"] + + def process(self, instance): + + AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + "http://localhost:8082") + assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + + context = instance.context + workspace = context.data["workspaceDir"] + filepath = context.data["currentFile"] + filename = os.path.basename(filepath) + comment = context.data.get("comment", "") + scene = os.path.splitext(filename)[0] + dirname = os.path.join(workspace, "renders") + renderlayer = instance.data['setMembers'] # rs_beauty + renderlayer_name = instance.data['subset'] # beauty + renderlayer_globals = instance.data["renderGlobals"] + legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] + deadline_user = context.data.get("deadlineUser", getpass.getuser()) + jobname = "%s - %s" % (filename, instance.name) + + # Get the variables depending on the renderer + render_variables = get_renderer_variables(renderlayer) + output_filename_0 = preview_fname(folder=dirname, + scene=scene, + layer=renderlayer_name, + padding=render_variables["padding"], + ext=render_variables["ext"]) + + try: + # Ensure render folder exists + os.makedirs(dirname) + except OSError: + pass + + # Documentation for keys available at: + # https://docs.thinkboxsoftware.com + # /products/deadline/8.0/1_User%20Manual/manual + # /manual-submission.html#job-info-file-options + payload = { + "JobInfo": { + # Top-level group name + "BatchName": filename, + + # Job name, as seen in Monitor + "Name": jobname, + + # Arbitrary username, for visualisation in Monitor + "UserName": deadline_user, + + "Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"), + "Frames": "{start}-{end}x{step}".format( + start=int(instance.data["startFrame"]), + end=int(instance.data["endFrame"]), + step=int(instance.data["byFrameStep"]), + ), + + "Comment": comment, + + # Optional, enable double-click to preview rendered + # frames from Deadline Monitor + "OutputFilename0": output_filename_0.replace("\\", "/"), + }, + "PluginInfo": { + # Input + "SceneFile": filepath, + + # Output directory and filename + "OutputFilePath": dirname.replace("\\", "/"), + "OutputFilePrefix": render_variables["filename_prefix"], + + # Mandatory for Deadline + "Version": cmds.about(version=True), + + # Only render layers are considered renderable in this pipeline + "UsingRenderLayers": True, + + # Use legacy Render Layer system + "UseLegacyRenderLayers": legacy_layers, + + # Render only this layer + "RenderLayer": renderlayer, + + # Determine which renderer to use from the file itself + "Renderer": instance.data["renderer"], + + # Resolve relative references + "ProjectPath": workspace, + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + # Include critical environment variables with submission + keys = [ + # This will trigger `userSetup.py` on the slave + # such that proper initialisation happens the same + # way as it does on a local machine. + # TODO(marcus): This won't work if the slaves don't + # have accesss to these paths, such as if slaves are + # running Linux and the submitter is on Windows. + "PYTHONPATH", + + # todo: This is a temporary fix for yeti variables + "PEREGRINEL_LICENSE", + "REDSHIFT_MAYAEXTENSIONSPATH", + "REDSHIFT_DISABLEOUTPUTLOCKFILES" + "VRAY_FOR_MAYA2018_PLUGINS_X64", + "VRAY_PLUGINS_X64", + "VRAY_USE_THREAD_AFFINITY", + "MAYA_MODULE_PATH" + ] + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **api.Session) + + PATHS = os.environ["PATH"].split(";") + environment["PATH"] = ";".join([p for p in PATHS + if p.startswith("P:")]) + + payload["JobInfo"].update({ + "EnvironmentKeyValue%d" % index: "{key}={value}".format( + key=key, + value=environment[key] + ) for index, key in enumerate(environment) + }) + + # Include optional render globals + render_globals = instance.data.get("renderGlobals", {}) + payload["JobInfo"].update(render_globals) + + plugin = payload["JobInfo"]["Plugin"] + self.log.info("using render plugin : {}".format(plugin)) + + self.preflight_check(instance) + + self.log.info("Submitting..") + self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + + # E.g. http://192.168.0.1:8082/api/jobs + url = "{}/api/jobs".format(AVALON_DEADLINE) + response = requests.post(url, json=payload) + if not response.ok: + raise Exception(response.text) + + # Store output dir for unified publisher (filesequence) + instance.data["outputDir"] = os.path.dirname(output_filename_0) + instance.data["deadlineSubmissionJob"] = response.json() + + def preflight_check(self, instance): + """Ensure the startFrame, endFrame and byFrameStep are integers""" + + for key in ("startFrame", "endFrame", "byFrameStep"): + value = instance.data[key] + + if int(value) == value: + continue + + self.log.warning( + "%f=%d was rounded off to nearest integer" + % (value, int(value)) + ) diff --git a/pype/plugins/nuke/publish/validate_deadline_connection.py b/pype/plugins/nuke/publish/validate_deadline_connection.py new file mode 100644 index 0000000000..53399bfb33 --- /dev/null +++ b/pype/plugins/nuke/publish/validate_deadline_connection.py @@ -0,0 +1,27 @@ +import pyblish.api + +import avalon.api as api +from avalon.vendor import requests + + +class ValidateDeadlineConnection(pyblish.api.ContextPlugin): + """Validate Deadline Web Service is running""" + + label = "Validate Deadline Web Service" + order = pyblish.api.ValidatorOrder + hosts = ["maya"] + families = ["renderlayer"] + + def process(self, instance): + + AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + "http://localhost:8082") + + assert AVALON_DEADLINE is not None, "Requires AVALON_DEADLINE" + + # Check response + response = requests.get(AVALON_DEADLINE) + assert response.ok, "Response must be ok" + assert response.text.startswith("Deadline Web Service "), ( + "Web service did not respond with 'Deadline Web Service'" + ) \ No newline at end of file From 2020986f76624dce9da05cb42f8abf49d8d62828 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 16 Nov 2018 13:44:51 +0100 Subject: [PATCH 05/68] starting on pyblish ftrack implementation --- pype/ftrack/actions/ftrack_action_handler.py | 2 + .../global/publish/collect_ftrack_api.py | 26 + .../global/publish/integrate_ftrack_api.py | 281 ++ pype/vendor/backports/__init__.py | 1 + .../vendor/backports/configparser/__init__.py | 1390 ++++++++++ pype/vendor/backports/configparser/helpers.py | 171 ++ pype/vendor/backports/functools_lru_cache.py | 184 ++ pype/vendor/ftrack_api_27/__init__.py | 32 + .../_centralized_storage_scenario.py | 656 +++++ pype/vendor/ftrack_api_27/_python_ntpath.py | 534 ++++ pype/vendor/ftrack_api_27/_version.py | 1 + .../vendor/ftrack_api_27/accessor/__init__.py | 2 + pype/vendor/ftrack_api_27/accessor/base.py | 124 + pype/vendor/ftrack_api_27/accessor/disk.py | 250 ++ pype/vendor/ftrack_api_27/accessor/server.py | 240 ++ pype/vendor/ftrack_api_27/attribute.py | 697 +++++ pype/vendor/ftrack_api_27/cache.py | 579 ++++ pype/vendor/ftrack_api_27/collection.py | 507 ++++ pype/vendor/ftrack_api_27/data.py | 119 + pype/vendor/ftrack_api_27/entity/__init__.py | 2 + .../ftrack_api_27/entity/asset_version.py | 91 + pype/vendor/ftrack_api_27/entity/base.py | 402 +++ pype/vendor/ftrack_api_27/entity/component.py | 74 + pype/vendor/ftrack_api_27/entity/factory.py | 431 +++ pype/vendor/ftrack_api_27/entity/job.py | 48 + pype/vendor/ftrack_api_27/entity/location.py | 732 +++++ pype/vendor/ftrack_api_27/entity/note.py | 68 + .../ftrack_api_27/entity/project_schema.py | 94 + pype/vendor/ftrack_api_27/entity/user.py | 123 + pype/vendor/ftrack_api_27/event/__init__.py | 2 + pype/vendor/ftrack_api_27/event/base.py | 85 + pype/vendor/ftrack_api_27/event/expression.py | 281 ++ pype/vendor/ftrack_api_27/event/hub.py | 1053 ++++++++ pype/vendor/ftrack_api_27/event/subscriber.py | 27 + .../ftrack_api_27/event/subscription.py | 23 + pype/vendor/ftrack_api_27/exception.py | 392 +++ pype/vendor/ftrack_api_27/formatter.py | 131 + pype/vendor/ftrack_api_27/inspection.py | 135 + pype/vendor/ftrack_api_27/logging.py | 26 + pype/vendor/ftrack_api_27/operation.py | 115 + pype/vendor/ftrack_api_27/plugin.py | 121 + pype/vendor/ftrack_api_27/query.py | 202 ++ .../__init__.py | 2 + .../resource_identifier_transformer/base.py | 50 + pype/vendor/ftrack_api_27/session.py | 2397 +++++++++++++++++ .../ftrack_api_27/structure/__init__.py | 2 + pype/vendor/ftrack_api_27/structure/base.py | 38 + .../ftrack_api_27/structure/entity_id.py | 12 + pype/vendor/ftrack_api_27/structure/id.py | 91 + pype/vendor/ftrack_api_27/structure/origin.py | 28 + .../ftrack_api_27/structure/standard.py | 217 ++ pype/vendor/ftrack_api_27/symbol.py | 75 + pype/vendor/mtoa_3.1.1.toml | 21 + pype/vendor/mtoa_30.1.1.toml | 21 + 54 files changed, 13408 insertions(+) create mode 100644 pype/plugins/global/publish/collect_ftrack_api.py create mode 100644 pype/plugins/global/publish/integrate_ftrack_api.py create mode 100644 pype/vendor/backports/__init__.py create mode 100644 pype/vendor/backports/configparser/__init__.py create mode 100644 pype/vendor/backports/configparser/helpers.py create mode 100644 pype/vendor/backports/functools_lru_cache.py create mode 100644 pype/vendor/ftrack_api_27/__init__.py create mode 100644 pype/vendor/ftrack_api_27/_centralized_storage_scenario.py create mode 100644 pype/vendor/ftrack_api_27/_python_ntpath.py create mode 100644 pype/vendor/ftrack_api_27/_version.py create mode 100644 pype/vendor/ftrack_api_27/accessor/__init__.py create mode 100644 pype/vendor/ftrack_api_27/accessor/base.py create mode 100644 pype/vendor/ftrack_api_27/accessor/disk.py create mode 100644 pype/vendor/ftrack_api_27/accessor/server.py create mode 100644 pype/vendor/ftrack_api_27/attribute.py create mode 100644 pype/vendor/ftrack_api_27/cache.py create mode 100644 pype/vendor/ftrack_api_27/collection.py create mode 100644 pype/vendor/ftrack_api_27/data.py create mode 100644 pype/vendor/ftrack_api_27/entity/__init__.py create mode 100644 pype/vendor/ftrack_api_27/entity/asset_version.py create mode 100644 pype/vendor/ftrack_api_27/entity/base.py create mode 100644 pype/vendor/ftrack_api_27/entity/component.py create mode 100644 pype/vendor/ftrack_api_27/entity/factory.py create mode 100644 pype/vendor/ftrack_api_27/entity/job.py create mode 100644 pype/vendor/ftrack_api_27/entity/location.py create mode 100644 pype/vendor/ftrack_api_27/entity/note.py create mode 100644 pype/vendor/ftrack_api_27/entity/project_schema.py create mode 100644 pype/vendor/ftrack_api_27/entity/user.py create mode 100644 pype/vendor/ftrack_api_27/event/__init__.py create mode 100644 pype/vendor/ftrack_api_27/event/base.py create mode 100644 pype/vendor/ftrack_api_27/event/expression.py create mode 100644 pype/vendor/ftrack_api_27/event/hub.py create mode 100644 pype/vendor/ftrack_api_27/event/subscriber.py create mode 100644 pype/vendor/ftrack_api_27/event/subscription.py create mode 100644 pype/vendor/ftrack_api_27/exception.py create mode 100644 pype/vendor/ftrack_api_27/formatter.py create mode 100644 pype/vendor/ftrack_api_27/inspection.py create mode 100644 pype/vendor/ftrack_api_27/logging.py create mode 100644 pype/vendor/ftrack_api_27/operation.py create mode 100644 pype/vendor/ftrack_api_27/plugin.py create mode 100644 pype/vendor/ftrack_api_27/query.py create mode 100644 pype/vendor/ftrack_api_27/resource_identifier_transformer/__init__.py create mode 100644 pype/vendor/ftrack_api_27/resource_identifier_transformer/base.py create mode 100644 pype/vendor/ftrack_api_27/session.py create mode 100644 pype/vendor/ftrack_api_27/structure/__init__.py create mode 100644 pype/vendor/ftrack_api_27/structure/base.py create mode 100644 pype/vendor/ftrack_api_27/structure/entity_id.py create mode 100644 pype/vendor/ftrack_api_27/structure/id.py create mode 100644 pype/vendor/ftrack_api_27/structure/origin.py create mode 100644 pype/vendor/ftrack_api_27/structure/standard.py create mode 100644 pype/vendor/ftrack_api_27/symbol.py create mode 100644 pype/vendor/mtoa_3.1.1.toml create mode 100644 pype/vendor/mtoa_30.1.1.toml diff --git a/pype/ftrack/actions/ftrack_action_handler.py b/pype/ftrack/actions/ftrack_action_handler.py index 31afc79e7e..47b55c2112 100644 --- a/pype/ftrack/actions/ftrack_action_handler.py +++ b/pype/ftrack/actions/ftrack_action_handler.py @@ -240,6 +240,8 @@ class AppAction(object): os.environ["AVALON_APP"] = self.identifier os.environ["AVALON_APP_NAME"] = self.identifier + "_" + self.variant + os.environ["FTRACK_TASKID"] = id + anatomy = t.anatomy io.install() hierarchy = io.find_one({"type": 'asset', "name": entity['parent']['name']})['data']['parents'] diff --git a/pype/plugins/global/publish/collect_ftrack_api.py b/pype/plugins/global/publish/collect_ftrack_api.py new file mode 100644 index 0000000000..87cd085e19 --- /dev/null +++ b/pype/plugins/global/publish/collect_ftrack_api.py @@ -0,0 +1,26 @@ +import os + +import ftrack_api_27 as ftrack_api +import pyblish.api + + +class PyblishFtrackCollectFtrackApi(pyblish.api.ContextPlugin): + """ Collects an ftrack session and the current task id. """ + + order = pyblish.api.CollectorOrder + label = "Ftrack" + + def process(self, context): + + # Collect session + session = ftrack_api.Session() + context.data["ftrackSession"] = session + + # Collect task + taskid = "" + + taskid = os.environ.get("FTRACK_TASKID", "") + + context.data["ftrackTask"] = session.get("Task", taskid) + + self.log.info("collected: {}".format(context.data["ftrackTask"])) diff --git a/pype/plugins/global/publish/integrate_ftrack_api.py b/pype/plugins/global/publish/integrate_ftrack_api.py new file mode 100644 index 0000000000..95f6cffb23 --- /dev/null +++ b/pype/plugins/global/publish/integrate_ftrack_api.py @@ -0,0 +1,281 @@ +import os + +import pyblish.api +from pype.vendor import clique + + +class PyblishFtrackIntegrateFtrackApi(pyblish.api.InstancePlugin): + """ Commit components to server. """ + + order = pyblish.api.IntegratorOrder + label = "Ftrack" + families = ["ftrack"] + + def query(self, entitytype, data): + """ Generate a query expression from data supplied. + + If a value is not a string, we'll add the id of the entity to the + query. + + Args: + entitytype (str): The type of entity to query. + data (dict): The data to identify the entity. + exclusions (list): All keys to exclude from the query. + + Returns: + str: String query to use with "session.query" + """ + queries = [] + for key, value in data.iteritems(): + if not isinstance(value, (basestring, int)): + if "id" in value.keys(): + queries.append( + "{0}.id is \"{1}\"".format(key, value["id"]) + ) + else: + queries.append("{0} is \"{1}\"".format(key, value)) + + query = ( + "select id from " + entitytype + " where " + " and ".join(queries) + ) + self.log.debug(query) + return query + + def process(self, instance): + + session = instance.context.data["ftrackSession"] + task = instance.context.data["ftrackTask"] + + info_msg = "Created new {entity_type} with data: {data}" + info_msg += ", metadata: {metadata}." + + # Iterate over components and publish + for data in instance.data.get("ftrackComponentsList", []): + + # AssetType + # Get existing entity. + assettype_data = {"short": "upload"} + assettype_data.update(data.get("assettype_data", {})) + + assettype_entity = session.query( + self.query("AssetType", assettype_data) + ).first() + + # Create a new entity if none exits. + if not assettype_entity: + assettype_entity = session.create("AssetType", assettype_data) + self.log.info( + "Created new AssetType with data: ".format(assettype_data) + ) + + # Asset + # Get existing entity. + asset_data = { + "name": task["name"], + "type": assettype_entity, + "parent": task["parent"], + } + asset_data.update(data.get("asset_data", {})) + + asset_entity = session.query( + self.query("Asset", asset_data) + ).first() + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + asset_metadata = asset_data.pop("metadata", {}) + + # Create a new entity if none exits. + if not asset_entity: + asset_entity = session.create("Asset", asset_data) + self.log.info( + info_msg.format( + entity_type="Asset", + data=asset_data, + metadata=asset_metadata + ) + ) + + # Adding metadata + existing_asset_metadata = asset_entity["metadata"] + existing_asset_metadata.update(asset_metadata) + asset_entity["metadata"] = existing_asset_metadata + + # AssetVersion + # Get existing entity. + assetversion_data = { + "version": 0, + "asset": asset_entity, + "task": task + } + assetversion_data.update(data.get("assetversion_data", {})) + + assetversion_entity = session.query( + self.query("AssetVersion", assetversion_data) + ).first() + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + assetversion_metadata = assetversion_data.pop("metadata", {}) + + # Create a new entity if none exits. + if not assetversion_entity: + assetversion_entity = session.create( + "AssetVersion", assetversion_data + ) + self.log.info( + info_msg.format( + entity_type="AssetVersion", + data=assetversion_data, + metadata=assetversion_metadata + ) + ) + + # Adding metadata + existing_assetversion_metadata = assetversion_entity["metadata"] + existing_assetversion_metadata.update(assetversion_metadata) + assetversion_entity["metadata"] = existing_assetversion_metadata + + # Have to commit the version and asset, because location can't + # determine the final location without. + session.commit() + + # Component + # Get existing entity. + component_data = { + "name": "main", + "version": assetversion_entity + } + component_data.update(data.get("component_data", {})) + + component_entity = session.query( + self.query("Component", component_data) + ).first() + + component_overwrite = data.get("component_overwrite", False) + location = data.get("component_location", session.pick_location()) + + # Overwrite existing component data if requested. + if component_entity and component_overwrite: + + origin_location = session.query( + "Location where name is \"ftrack.origin\"" + ).one() + + # Removing existing members from location + components = list(component_entity.get("members", [])) + components += [component_entity] + for component in components: + for loc in component["component_locations"]: + if location["id"] == loc["location_id"]: + location.remove_component( + component, recursive=False + ) + + # Deleting existing members on component entity + for member in component_entity.get("members", []): + session.delete(member) + del(member) + + session.commit() + + # Reset members in memory + if "members" in component_entity.keys(): + component_entity["members"] = [] + + # Add components to origin location + try: + collection = clique.parse(data["component_path"]) + except ValueError: + # Assume its a single file + # Changing file type + name, ext = os.path.splitext(data["component_path"]) + component_entity["file_type"] = ext + + origin_location.add_component( + component_entity, data["component_path"] + ) + else: + # Changing file type + component_entity["file_type"] = collection.format("{tail}") + + # Create member components for sequence. + for member_path in collection: + + size = 0 + try: + size = os.path.getsize(member_path) + except OSError: + pass + + name = collection.match(member_path).group("index") + + member_data = { + "name": name, + "container": component_entity, + "size": size, + "file_type": os.path.splitext(member_path)[-1] + } + + component = session.create( + "FileComponent", member_data + ) + origin_location.add_component( + component, member_path, recursive=False + ) + component_entity["members"].append(component) + + # Add components to location. + location.add_component( + component_entity, origin_location, recursive=True + ) + + data["component"] = component + msg = "Overwriting Component with path: {0}, data: {1}, " + msg += "location: {2}" + self.log.info( + msg.format( + data["component_path"], + component_data, + location + ) + ) + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + component_metadata = component_data.pop("metadata", {}) + + # Create new component if none exists. + if not component_entity: + component_entity = assetversion_entity.create_component( + data["component_path"], + data=component_data, + location=location + ) + data["component"] = component_entity + msg = "Created new Component with path: {0}, data: {1}" + msg += ", metadata: {2}, location: {3}" + self.log.info( + msg.format( + data["component_path"], + component_data, + component_metadata, + location + ) + ) + + # Adding metadata + existing_component_metadata = component_entity["metadata"] + existing_component_metadata.update(component_metadata) + component_entity["metadata"] = existing_component_metadata + + # Inform user about no changes to the database. + if component_entity and not component_overwrite: + data["component"] = component_entity + self.log.info( + "Found existing component, and no request to overwrite. " + "Nothing has been changed." + ) + else: + # Commit changes. + session.commit() diff --git a/pype/vendor/backports/__init__.py b/pype/vendor/backports/__init__.py new file mode 100644 index 0000000000..69e3be50da --- /dev/null +++ b/pype/vendor/backports/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/pype/vendor/backports/configparser/__init__.py b/pype/vendor/backports/configparser/__init__.py new file mode 100644 index 0000000000..06d7a0855f --- /dev/null +++ b/pype/vendor/backports/configparser/__init__.py @@ -0,0 +1,1390 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +"""Configuration file parser. + +A configuration file consists of sections, lead by a "[section]" header, +and followed by "name: value" entries, with continuations and such in +the style of RFC 822. + +Intrinsic defaults can be specified by passing them into the +ConfigParser constructor as a dictionary. + +class: + +ConfigParser -- responsible for parsing a list of + configuration files, and managing the parsed database. + + methods: + + __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, + delimiters=('=', ':'), comment_prefixes=('#', ';'), + inline_comment_prefixes=None, strict=True, + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=): + Create the parser. When `defaults' is given, it is initialized into the + dictionary or intrinsic defaults. The keys must be strings, the values + must be appropriate for %()s string interpolation. + + When `dict_type' is given, it will be used to create the dictionary + objects for the list of sections, for the options within a section, and + for the default values. + + When `delimiters' is given, it will be used as the set of substrings + that divide keys from values. + + When `comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in empty lines. Comments can be + indented. + + When `inline_comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in non-empty lines. + + When `strict` is True, the parser won't allow for any section or option + duplicates while reading from a single source (file, string or + dictionary). Default is True. + + When `empty_lines_in_values' is False (default: True), each empty line + marks the end of an option. Otherwise, internal empty lines of + a multiline option are kept as part of the value. + + When `allow_no_value' is True (default: False), options without + values are accepted; the value presented for these is None. + + sections() + Return all the configuration section names, sans DEFAULT. + + has_section(section) + Return whether the given section exists. + + has_option(section, option) + Return whether the given option exists in the given section. + + options(section) + Return list of configuration options for the named section. + + read(filenames, encoding=None) + Read and parse the list of named configuration files, given by + name. A single filename is also allowed. Non-existing files + are ignored. Return list of successfully read files. + + read_file(f, filename=None) + Read and parse one configuration file, given as a file object. + The filename defaults to f.name; it is only used in error + messages (if f has no `name' attribute, the string `' is used). + + read_string(string) + Read configuration from a given string. + + read_dict(dictionary) + Read configuration from a dictionary. Keys are section names, + values are dictionaries with keys and values that should be present + in the section. If the used dictionary type preserves order, sections + and their keys will be added in order. Values are automatically + converted to strings. + + get(section, option, raw=False, vars=None, fallback=_UNSET) + Return a string value for the named option. All % interpolations are + expanded in the return values, based on the defaults passed into the + constructor and the DEFAULT section. Additional substitutions may be + provided using the `vars' argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option' is a key in + `vars', the value from `vars' is used. + + getint(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to an integer. + + getfloat(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a float. + + getboolean(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a boolean (currently case + insensitively defined as 0, false, no, off for False, and 1, true, + yes, on for True). Returns False or True. + + items(section=_UNSET, raw=False, vars=None) + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. + + remove_section(section) + Remove the given file section and all its options. + + remove_option(section, option) + Remove the given option from the given section. + + set(section, option, value) + Set the given option. + + write(fp, space_around_delimiters=True) + Write the configuration state in .ini format. If + `space_around_delimiters' is True (the default), delimiters + between keys and values are surrounded by spaces. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from collections import MutableMapping +import functools +import io +import itertools +import re +import sys +import warnings + +from backports.configparser.helpers import OrderedDict as _default_dict +from backports.configparser.helpers import ChainMap as _ChainMap +from backports.configparser.helpers import from_none, open, str, PY2 + +__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", + "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] + +DEFAULTSECT = "DEFAULT" + +MAX_INTERPOLATION_DEPTH = 10 + + +# exception classes +class Error(Exception): + """Base class for ConfigParser exceptions.""" + + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + + __str__ = __repr__ + + +class NoSectionError(Error): + """Raised when no section matches a requested option.""" + + def __init__(self, section): + Error.__init__(self, 'No section: %r' % (section,)) + self.section = section + self.args = (section, ) + + +class DuplicateSectionError(Error): + """Raised when a section is repeated in an input source. + + Possible repetitions that raise this exception are: multiple creation + using the API or in strict parsers when a section is found more than once + in a single input file, string or dictionary. + """ + + def __init__(self, section, source=None, lineno=None): + msg = [repr(section), " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": section ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Section ") + Error.__init__(self, "".join(msg)) + self.section = section + self.source = source + self.lineno = lineno + self.args = (section, source, lineno) + + +class DuplicateOptionError(Error): + """Raised by strict parsers when an option is repeated in an input source. + + Current implementation raises this exception only when an option is found + more than once in a single file, string or dictionary. + """ + + def __init__(self, section, option, source=None, lineno=None): + msg = [repr(option), " in section ", repr(section), + " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": option ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Option ") + Error.__init__(self, "".join(msg)) + self.section = section + self.option = option + self.source = source + self.lineno = lineno + self.args = (section, option, source, lineno) + + +class NoOptionError(Error): + """A requested option was not found.""" + + def __init__(self, option, section): + Error.__init__(self, "No option %r in section: %r" % + (option, section)) + self.option = option + self.section = section + self.args = (option, section) + + +class InterpolationError(Error): + """Base class for interpolation-related exceptions.""" + + def __init__(self, option, section, msg): + Error.__init__(self, msg) + self.option = option + self.section = section + self.args = (option, section, msg) + + +class InterpolationMissingOptionError(InterpolationError): + """A string substitution required a setting which was not available.""" + + def __init__(self, option, section, rawval, reference): + msg = ("Bad value substitution: option {0!r} in section {1!r} contains " + "an interpolation key {2!r} which is not a valid option name. " + "Raw value: {3!r}".format(option, section, reference, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.reference = reference + self.args = (option, section, rawval, reference) + + +class InterpolationSyntaxError(InterpolationError): + """Raised when the source text contains invalid syntax. + + Current implementation raises this exception when the source text into + which substitutions are made does not conform to the required syntax. + """ + + +class InterpolationDepthError(InterpolationError): + """Raised when substitutions are nested too deeply.""" + + def __init__(self, option, section, rawval): + msg = ("Recursion limit exceeded in value substitution: option {0!r} " + "in section {1!r} contains an interpolation key which " + "cannot be substituted in {2} steps. Raw value: {3!r}" + "".format(option, section, MAX_INTERPOLATION_DEPTH, + rawval)) + InterpolationError.__init__(self, option, section, msg) + self.args = (option, section, rawval) + + +class ParsingError(Error): + """Raised when a configuration file does not follow legal syntax.""" + + def __init__(self, source=None, filename=None): + # Exactly one of `source'/`filename' arguments has to be given. + # `filename' kept for compatibility. + if filename and source: + raise ValueError("Cannot specify both `filename' and `source'. " + "Use `source'.") + elif not filename and not source: + raise ValueError("Required argument `source' not given.") + elif filename: + source = filename + Error.__init__(self, 'Source contains parsing errors: %r' % source) + self.source = source + self.errors = [] + self.args = (source, ) + + @property + def filename(self): + """Deprecated, use `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + return self.source + + @filename.setter + def filename(self, value): + """Deprecated, user `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + self.source = value + + def append(self, lineno, line): + self.errors.append((lineno, line)) + self.message += '\n\t[line %2d]: %s' % (lineno, line) + + +class MissingSectionHeaderError(ParsingError): + """Raised when a key-value pair is found before any section header.""" + + def __init__(self, filename, lineno, line): + Error.__init__( + self, + 'File contains no section headers.\nfile: %r, line: %d\n%r' % + (filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +# Used in parser getters to indicate the default behaviour when a specific +# option is not found it to raise an exception. Created to enable `None' as +# a valid fallback value. +_UNSET = object() + + +class Interpolation(object): + """Dummy interpolation that passes the value through with no changes.""" + + def before_get(self, parser, section, option, value, defaults): + return value + + def before_set(self, parser, section, option, value): + return value + + def before_read(self, parser, section, option, value): + return value + + def before_write(self, parser, section, option, value): + return value + + +class BasicInterpolation(Interpolation): + """Interpolation as implemented in the classic ConfigParser. + + The option values can contain format strings which refer to other values in + the same section, or values in the special default section. + + For example: + + something: %(dir)s/whatever + + would resolve the "%(dir)s" to the value of dir. All reference + expansions are done late, on demand. If a user needs to use a bare % in + a configuration file, she can escape it by writing %%. Other % usage + is considered a user error and raises `InterpolationSyntaxError'.""" + + _KEYCRE = re.compile(r"%\(([^)]+)\)s") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('%%', '') # escaped percent signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '%' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = parser.optionxform(m.group(1)) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise from_none(InterpolationMissingOptionError( + option, section, rawval, var)) + if "%" in v: + self._interpolate_some(parser, option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%%' must be followed by '%%' or '(', " + "found: %r" % (rest,)) + + +class ExtendedInterpolation(Interpolation): + """Advanced variant of interpolation, supports the syntax used by + `zc.buildout'. Enables interpolation between sections.""" + + _KEYCRE = re.compile(r"\$\{([^}]+)\}") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('$$', '') # escaped dollar signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '$' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('$'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("$") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "$": + accum.append("$") + rest = rest[2:] + elif c == "{": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + path = m.group(1).split(':') + rest = rest[m.end():] + sect = section + opt = option + try: + if len(path) == 1: + opt = parser.optionxform(path[0]) + v = map[opt] + elif len(path) == 2: + sect = path[0] + opt = parser.optionxform(path[1]) + v = parser.get(sect, opt, raw=True) + else: + raise InterpolationSyntaxError( + option, section, + "More than one ':' found: %r" % (rest,)) + except (KeyError, NoSectionError, NoOptionError): + raise from_none(InterpolationMissingOptionError( + option, section, rawval, ":".join(path))) + if "$" in v: + self._interpolate_some(parser, opt, accum, v, sect, + dict(parser.items(sect, raw=True)), + depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'$' must be followed by '$' or '{', " + "found: %r" % (rest,)) + + +class LegacyInterpolation(Interpolation): + """Deprecated interpolation used in old versions of ConfigParser. + Use BasicInterpolation or ExtendedInterpolation instead.""" + + _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") + + def before_get(self, parser, section, option, value, vars): + rawval = value + depth = MAX_INTERPOLATION_DEPTH + while depth: # Loop through this until it's done + depth -= 1 + if value and "%(" in value: + replace = functools.partial(self._interpolation_replace, + parser=parser) + value = self._KEYCRE.sub(replace, value) + try: + value = value % vars + except KeyError as e: + raise from_none(InterpolationMissingOptionError( + option, section, rawval, e.args[0])) + else: + break + if value and "%(" in value: + raise InterpolationDepthError(option, section, rawval) + return value + + def before_set(self, parser, section, option, value): + return value + + @staticmethod + def _interpolation_replace(match, parser): + s = match.group(1) + if s is None: + return match.group() + else: + return "%%(%s)s" % parser.optionxform(s) + + +class RawConfigParser(MutableMapping): + """ConfigParser that does not do interpolation.""" + + # Regular expressions for parsing section headers and options + _SECT_TMPL = r""" + \[ # [ + (?P
[^]]+) # very permissive! + \] # ] + """ + _OPT_TMPL = r""" + (?P